licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 14953 | # Jorge Fernández de Cossío Díaz ( @cossio ) wrote the kabsch, rmsd and center! functions.
"""
`kabsch(A::AbstractMatrix{Float64}, B::AbstractMatrix{Float64})`
This function takes two sets of points, `A` (refrence) and `B` as NxD matrices, where D
is the dimension and N is the number of points.
Assumes that the centroids of `A` and `B` are at the origin of coordinates.
You can call `center!` on each matrix before calling `kabsch` to center the matrices
in the `(0.0, 0.0, 0.0)`.
Rotates `B` so that `rmsd(A,B)` is minimized.
Returns the rotation matrix. You should do `B * RotationMatrix` to get the rotated B.
"""
function kabsch(A::AbstractMatrix{Float64}, B::AbstractMatrix{Float64})
@assert size(A) == size(B)
M::AbstractMatrix{Float64} = B' * A
χ = Matrix{Float64}(I, size(M, 1), size(M, 2))
χ[end, end] = sign(det(M))
u::AbstractMatrix{Float64}, σ::Vector{Float64}, v::AbstractMatrix{Float64} = svd(M)
return u * χ * v'
end
"""
`center!(A::AbstractMatrix{Float64})`
Takes a set of points `A` as an NxD matrix (N: number of points, D: dimension).
Translates `A` in place so that its centroid is at the origin of coordinates
"""
function center!(A::AbstractMatrix{Float64})
for i = 1:size(A)[2]
A[:, i] .-= mean(A[:, i])
end
end
"""
`rmsd(A::AbstractMatrix{Float64}, B::AbstractMatrix{Float64})`
Return RMSD between two sets of points `A` and `B`, given as NxD matrices
(N: number of points, D: dimension).
"""
function rmsd(A::AbstractMatrix{Float64}, B::AbstractMatrix{Float64})
@assert size(A) == size(B)
N, D = size(A)
s::Float64 = 0.0
for i = 1:N, j = 1:D
s += (B[i, j]::Float64 - A[i, j]::Float64)^2
end
return sqrt(s / N)
end
"""
`rmsd(A::AbstractVector{PDBResidue}, B::AbstractVector{PDBResidue}; superimposed::Bool=false)`
Returns the Cα RMSD value between two PDB structures: `A` and `B`.
If the structures are already superimposed between them,
use `superimposed=true` to avoid a new superimposition (`superimposed` is `false` by default).
"""
function rmsd(
A::AbstractVector{PDBResidue},
B::AbstractVector{PDBResidue};
superimposed::Bool = false,
)
if superimposed
rmsd(CAmatrix(A), CAmatrix(B))
else
superimpose(A, B)[end]::Float64
end
end
# Kabsch for Vector{PDBResidues}
# ==============================
"""
Returns the Cα with best occupancy in the `PDBResidue`.
If the `PDBResidue` has no Cα, `missing` is returned.
"""
function getCA(res::PDBResidue)
if length(res) == 0
@warn """There are no atoms in residue
$(res.id)"""
return missing
end
CAs = findatoms(res, "CA")
if length(CAs) == 0
@warn """There is no alpha carbon in residue
$(res.id)"""
missing
else
CAindex = selectbestoccupancy(res, CAs)
res.atoms[CAindex]
end
end
"""
Returns a matrix with the x, y and z coordinates of the Cα with best occupancy for each
`PDBResidue` of the ATOM group. If a residue doesn't have a Cα, its Cα coordinates are NaNs.
"""
function CAmatrix(residues::AbstractVector{PDBResidue})
len = length(residues)
CAlist = Array{Float64}(undef, 3 * len)
j = 0
r = 0
@inbounds for i = 1:len
res = residues[i]
if (res.id.group == "ATOM") && (length(res) > 0)
r += 1
CAs = findatoms(res, "CA")
if length(CAs) != 0
CAindex = selectbestoccupancy(res, CAs)
coord = res.atoms[CAindex].coordinates
CAlist[j+=1] = coord.x
CAlist[j+=1] = coord.y
CAlist[j+=1] = coord.z
else
CAlist[j+=1] = NaN
CAlist[j+=1] = NaN
CAlist[j+=1] = NaN
end
end
end
reshape(resize!(CAlist, j), (3, r))'
end
"""
Returns a matrix with the x, y, z coordinates of each atom in each `PDBResidue`
"""
function coordinatesmatrix(res::PDBResidue)
atoms = res.atoms
len = length(atoms)
mat = Array{Float64}(undef, 3, len)
for i = 1:len
coord = atoms[i].coordinates
mat[1, i] = coord.x
mat[2, i] = coord.y
mat[3, i] = coord.z
end
mat'
end
function coordinatesmatrix(residues::AbstractVector{PDBResidue})
reduce(vcat, map(coordinatesmatrix, residues))
end
"""
Returns a `Matrix{Float64}` with the centered coordinates of all the atoms in `residues`.
An optional positional argument `CA` (default: `true`) defines if only Cα carbons should
be used to center the matrix.
"""
function centeredcoordinates(residues::AbstractVector{PDBResidue}, CA::Bool = true)
coordinates = PDB.coordinatesmatrix(residues)
meancoord = CA ? mean(PDB.CAmatrix(residues), dims = 1) : mean(coordinates, dims = 1)
coordinates .- meancoord
end
"""
Returns a new `Vector{PDBResidue}` with the `PDBResidue`s having centered coordinates.
An optional positional argument `CA` (default: `true`) defines if only Cα carbons should
be used to center the matrix.
"""
function centeredresidues(residues::AbstractVector{PDBResidue}, CA::Bool = true)
coordinates = centeredcoordinates(residues, CA)
change_coordinates(residues, coordinates)
end
"""
`change_coordinates(atom::PDBAtom, coordinates::Coordinates)`
Returns a new `PDBAtom` but with a new `coordinates`
"""
function change_coordinates(atom::PDBAtom, coordinates::Coordinates)
PDBAtom(
coordinates,
identity(atom.atom),
identity(atom.element),
copy(atom.occupancy),
identity(atom.B),
identity(atom.alt_id),
identity(atom.charge),
)
end
"""
`change_coordinates(residue::PDBResidue, coordinates::AbstractMatrix{Float64}, offset::Int=1)`
Returns a new `PDBResidues` with (x,y,z) from a coordinates `AbstractMatrix{Float64}`
You can give an `offset` indicating in wich matrix row starts the (x,y,z) coordinates
of the residue.
"""
function change_coordinates(
residue::PDBResidue,
coordinates::AbstractMatrix{Float64},
offset::Int = 1,
)
centeredatoms = map(residue.atoms) do atom
atoms = change_coordinates(atom, Coordinates(vec(coordinates[offset, :])))
offset += 1
return atoms
end
PDBResidue(residue.id, centeredatoms)
end
"""
`change_coordinates(residues::AbstractVector{PDBResidue}, coordinates::AbstractMatrix{Float64})`
Returns a new `Vector{PDBResidues}` with (x,y,z) from a coordinates `Matrix{Float64}`
"""
function change_coordinates(
residues::AbstractVector{PDBResidue},
coordinates::AbstractMatrix{Float64},
)
nres = length(residues)
updated = Array{PDBResidue}(undef, nres)
j = 1
for i = 1:nres
residue = residues[i]
updated[i] = change_coordinates(residue, coordinates, j)
j += length(residue)
end
updated
end
"""
Returns a new `PDBAtom` but with a `B` as B-factor
"""
function _change_B(atom::PDBAtom, B::String)
PDBAtom(
copy(atom.coordinates),
deepcopy(atom.atom),
deepcopy(atom.element),
copy(atom.occupancy),
B,
deepcopy(atom.alt_id),
deepcopy(atom.charge),
)
end
_iscentered(x::Float64, y::Float64, z::Float64) =
(abs(x) <= 1e-13) && (abs(y) <= 1e-13) && (abs(z) <= 1e-13)
_iscentered(meanCα::AbstractVector{Float64}) = _iscentered(meanCα[1], meanCα[2], meanCα[3])
_iscentered(CA::AbstractMatrix{Float64}) = _iscentered(vec(mean(CA, dims = 1)))
"""
Return the matching CA matrices after deleting the rows/residues where the CA
is missing in at least one structure.
"""
function _get_matched_Cαs(
A::AbstractVector{PDBResidue},
B::AbstractVector{PDBResidue},
::Nothing,
)
length_A = length(A)
if length_A != length(B)
throw(ArgumentError("PDBResidue vectors should have the same length."))
end
ACα = PDB.CAmatrix(A)
BCα = PDB.CAmatrix(B)
without_Cα = isnan.(ACα[:, 1]) .| isnan.(BCα[:, 1])
if any(without_Cα)
n_without_ca = sum(without_Cα)
if length_A - n_without_ca == 0
throw(ArgumentError("There are not alpha-carbons to align."))
end
@warn string(
"Using ",
length_A - n_without_ca,
" residues for RMSD calculation because there are ",
n_without_ca,
" residues without CA: ",
findall(without_Cα),
)
with_Cα = .!without_Cα
return ACα[with_Cα, :], BCα[with_Cα, :]
end
ACα, BCα
end
function _get_matched_Cαs(
A::AbstractVector{PDBResidue},
B::AbstractVector{PDBResidue},
matches,
)
if Base.IteratorSize(typeof(matches)) == Base.SizeUnknown()
Asel, Bsel = PDBResidue[], PDBResidue[]
for (i, j) in matches
push!(Asel, A[i])
push!(Bsel, B[j])
end
return _get_matched_Cαs(Asel, Bsel, nothing)
end
Asel = Vector{PDBResidue}(undef, length(matches))
Bsel = similar(Asel)
for (k, (i, j)) in enumerate(matches)
Asel[k] = A[i]
Bsel[k] = B[j]
end
return _get_matched_Cαs(Asel, Bsel, nothing)
end
"""
Asuper, Bsuper, RMSD = superimpose(A, B, matches=nothing)
This function takes `A::AbstractVector{PDBResidue}` (reference) and
`B::AbstractVector{PDBResidue}`. Translates `A` and `B` to the origin of coordinates,
and rotates `B` so that `rmsd(A,B)` is minimized with the Kabsch algorithm
(using only their α carbons).
Returns the rotated and translated versions of `A` and `B`, and the RMSD value.
Optionally provide `matches` which iterates over matched index pairs in `A` and `B`,
e.g., `matches = [(3, 5), (4, 6), ...]`. The alignment will be constructed
using just the matching residues.
"""
function superimpose(
A::AbstractVector{PDBResidue},
B::AbstractVector{PDBResidue},
matches = nothing,
)
ACα, BCα = _get_matched_Cαs(A, B, matches)
Bxyz = PDB.coordinatesmatrix(B)
meanACα = vec(mean(ACα, dims = 1))
meanBCα = vec(mean(BCα, dims = 1))
if !_iscentered(meanBCα)
@inbounds BCα[:, 1] .-= meanBCα[1]
@inbounds BCα[:, 2] .-= meanBCα[2]
@inbounds BCα[:, 3] .-= meanBCα[3]
@inbounds Bxyz[:, 1] .-= meanBCα[1]
@inbounds Bxyz[:, 2] .-= meanBCα[2]
@inbounds Bxyz[:, 3] .-= meanBCα[3]
end
if !_iscentered(meanACα)
@inbounds ACα[:, 1] .-= meanACα[1]
@inbounds ACα[:, 2] .-= meanACα[2]
@inbounds ACα[:, 3] .-= meanACα[3]
Axyz = PDB.coordinatesmatrix(A)
@inbounds Axyz[:, 1] .-= meanACα[1]
@inbounds Axyz[:, 2] .-= meanACα[2]
@inbounds Axyz[:, 3] .-= meanACα[3]
RotationMatrix = PDB.kabsch(ACα, BCα)
return (
change_coordinates(A, Axyz),
change_coordinates(B, Bxyz * RotationMatrix),
PDB.rmsd(ACα, BCα * RotationMatrix),
)
else
RotationMatrix = PDB.kabsch(ACα, BCα)
return (
A,
change_coordinates(B, Bxyz * RotationMatrix),
PDB.rmsd(ACα, BCα * RotationMatrix),
)
end
end
# RMSF: Root Mean-Square-average distance (Fluctuation)
# -----------------------------------------------------
"""
This looks for errors in the input to rmsf methods
"""
function _rmsf_test(vector)
n = length(vector)
if n < 2
throw(ArgumentError("You need at least two matrices/structures"))
end
sizes = unique(Tuple{Int,Int}[size(s) for s in vector])
if length(sizes) > 1
throw(ArgumentError("Matrices/Structures must have the same number of rows/atoms"))
end
if sizes[1][2] != 3
throw(ArgumentError("Matrices should have 3 columns (x, y, z)"))
end
end
"""
Calculates the average/mean position of each atom in a set of structure.
The function takes a vector (`AbstractVector`) of vectors (`AbstractVector{PDBResidue}`)
or matrices (`AbstractMatrix{Float64}`) as first argument. As second (optional) argument this
function can take an `AbstractVector{Float64}` of matrix/structure weights to return a
weighted mean. When a AbstractVector{PDBResidue} is used, if the keyword argument `calpha` is
`false` the RMSF is calculated for all the atoms. By default only alpha carbons are used
(default: `calpha=true`).
"""
function mean_coordinates(vec::AbstractVector{T}) where {T<:AbstractMatrix{Float64}}
_rmsf_test(vec)
n = length(vec)
reduce(+, vec) ./ n
end
function mean_coordinates(
vec::AbstractVector{T},
matrixweights::AbstractVector{Float64},
) where {T<:AbstractMatrix{Float64}}
_rmsf_test(vec)
@assert length(vec) == length(matrixweights) "The number of matrix weights must be equal to the number of matrices."
n = sum(matrixweights)
reduce(+, (vec .* matrixweights)) ./ n
end
function mean_coordinates(
vec::AbstractVector{T};
calpha::Bool = true,
) where {T<:AbstractVector{PDBResidue}}
mean_coordinates(map(calpha ? CAmatrix : coordinatesmatrix, vec))
end
function mean_coordinates(
vec::AbstractVector{T},
args...;
calpha::Bool = true,
) where {T<:AbstractVector{PDBResidue}}
mean_coordinates(map(calpha ? CAmatrix : coordinatesmatrix, vec), args...)
end
"""
Calculates the RMSF (Root Mean-Square-Fluctuation) between an atom and its average
position in a set of structures. The function takes a vector (`AbstractVector`) of
vectors (`AbstractVector{PDBResidue}`) or matrices (`AbstractMatrix{Float64}`) as first
argument. As second (optional) argument this function can take an `AbstractVector{Float64}`
of matrix/structure weights to return the root weighted mean-square-fluctuation around
the weighted mean structure. When a Vector{PDBResidue} is used, if the keyword argument
`calpha` is `false` the RMSF is calculated for all the atoms. By default only alpha
carbons are used (default: `calpha=true`).
"""
function rmsf(vector::AbstractVector{T}) where {T<:AbstractMatrix{Float64}}
m = mean_coordinates(vector)
# MIToS RMSF is calculated as in Eq. 6 from:
# Kuzmanic, Antonija, and Bojan Zagrovic.
# "Determination of ensemble-average pairwise root mean-square deviation from experimental B-factors."
# Biophysical journal 98.5 (2010): 861-871.
vec(sqrt.(mean(map(mat -> mapslices(x -> sum(abs2, x), mat .- m, dims = 2), vector))))
end
function rmsf(
vector::AbstractVector{T},
matrixweights::AbstractVector{Float64},
) where {T<:AbstractMatrix{Float64}}
m = mean_coordinates(vector, matrixweights)
d = map(mat -> mapslices(x -> sum(abs2, x), mat .- m, dims = 2), vector)
vec(sqrt.(sum(d .* matrixweights) / sum(matrixweights)))
end
function rmsf(
vec::AbstractVector{T};
calpha::Bool = true,
) where {T<:AbstractVector{PDBResidue}}
rmsf(map(calpha ? CAmatrix : coordinatesmatrix, vec))
end
function rmsf(
vec::AbstractVector{T},
matrixweights::AbstractVector{Float64};
calpha::Bool = true,
) where {T<:AbstractVector{PDBResidue}}
rmsf(map(calpha ? CAmatrix : coordinatesmatrix, vec), matrixweights)
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 8500 | struct MMCIFFile <: FileFormat end
_clean_string(s::String) = replace(s, "." => "", "?" => "")
function _parse_mmcif_to_pdbresidues(mmcif_dict::MMCIFDict, label::Bool)
# Choose the correct prefix based on the label argument
prefix = label ? "_atom_site.label" : "_atom_site.auth"
chain_attr = string(prefix, "_asym_id")
comp_id_attr = string(prefix, "_comp_id")
atom_id_attr = string(prefix, "_atom_id")
# Extract relevant entries from the MMCIFDict
auth_asym_ids = mmcif_dict[chain_attr] # Chain
auth_seq_ids = mmcif_dict["_atom_site.auth_seq_id"] # Residue number
label_seq_ids = mmcif_dict["_atom_site.label_seq_id"] # Residue name (PDBe)
auth_comp_ids = mmcif_dict[comp_id_attr] # Residue name (PDB)
atom_names = mmcif_dict[atom_id_attr] # Atom name, e.g. "CA"
cartn_x = mmcif_dict["_atom_site.Cartn_x"] # x
cartn_y = mmcif_dict["_atom_site.Cartn_y"] # y
cartn_z = mmcif_dict["_atom_site.Cartn_z"] # z
occupancies = mmcif_dict["_atom_site.occupancy"] # Occupancy
bfactors = mmcif_dict["_atom_site.B_iso_or_equiv"] # B-factor
elements = mmcif_dict["_atom_site.type_symbol"] # Element, e.g. "C"
group_pdb = mmcif_dict["_atom_site.group_PDB"] # Group type, "ATOM" or "HETATM"
pdb_model = mmcif_dict["_atom_site.pdbx_PDB_model_num"] # Model number
alt_ids = mmcif_dict["_atom_site.label_alt_id"] # Alternative location ID
formal_charges = mmcif_dict["_atom_site.pdbx_formal_charge"] # Formal charge
ins_codes = mmcif_dict["_atom_site.pdbx_PDB_ins_code"] # Insertion codes
residues = PDBResidue[]
current_residue_id = ""
current_residue = PDBResidue(PDBResidueIdentifier("", "", "", "", "", ""), PDBAtom[])
for i = 1:length(auth_seq_ids)
pdb_number = string(auth_seq_ids[i], _clean_string(ins_codes[i]))
pdbe_number = _clean_string(label_seq_ids[i])
residue_id = PDBResidueIdentifier(
pdbe_number,
pdb_number,
auth_comp_ids[i],
group_pdb[i],
pdb_model[i],
auth_asym_ids[i],
)
if current_residue_id != string(residue_id)
if !isempty(current_residue.atoms)
push!(residues, current_residue)
end
current_residue = PDBResidue(residue_id, Vector{PDBAtom}())
current_residue_id = string(residue_id)
end
atom = PDBAtom(
Coordinates(
parse(Float64, cartn_x[i]),
parse(Float64, cartn_y[i]),
parse(Float64, cartn_z[i]),
),
atom_names[i],
elements[i],
parse(Float64, occupancies[i]),
bfactors[i],
_clean_string(alt_ids[i]),
_clean_string(formal_charges[i]),
)
push!(current_residue.atoms, atom)
end
if !isempty(current_residue.atoms)
push!(residues, current_residue)
end
residues
end
"""
`parse_file(io, ::Type{MMCIFFile}; chain=All, model=All, group=All, atomname=All, onlyheavy=false, label=true, occupancyfilter=false)`
Parse an mmCIF file and returns a list of `PDBResidue`s. Setting `chain`, `model`, `group`,
`atomname` and `onlyheavy` values can be used to select a subset of residues. Group can be
`"ATOM"` or `"HETATM"`. If those keyword arguments are not set, all residues are returned.
If the keyword argument `label` (default: `true`) is `false`, the **auth_** attributes will be used instead
of the **label_** attributes for `chain`, `atom`, and residue `name` fields. The **auth_**
attributes are alternatives provided by an author in order to match the
identification/values used in the publication that describes the structure. If the
keyword argument `occupancyfilter` (default: `false`) is `true`, only the atoms
with the best occupancy are returned.
"""
function Utils.parse_file(
io::Union{IO,String},
::Type{MMCIFFile};
chain::Union{String,Type{All}} = All,
model::Union{String,Type{All}} = All,
group::Union{String,Type{All}} = All,
atomname::Union{String,Type{All}} = All,
onlyheavy::Bool = false,
label::Bool = true,
occupancyfilter::Bool = false,
)
mmcif_dict = MMCIFDict(io)
residues = select_residues(
_parse_mmcif_to_pdbresidues(mmcif_dict, label),
model = model,
chain = chain,
group = group,
)
for res in residues
filter!(a -> _is(a.atom, atomname), res.atoms)
if occupancyfilter
res.atoms = bestoccupancy(res.atoms)
end
if onlyheavy
filter!(a -> a.element != "H", res.atoms)
end
end
filter!(res -> !isempty(res.atoms), residues)
end
function _resnumber(number)
num = replace(number, r"[A-Za-z]" => "")
isempty(num) ? "." : num
end
function _inscode(res::PDBResidue)
m = match(r"[A-Za-z]$", res.id.number)
return m === nothing ? "?" : m.match
end
function _pdbresidues_to_mmcifdict(
residues::Vector{PDBResidue};
label::Bool = false,
molecular_structures::Bool = false,
)
# Initialize MMCIFDict with the necessary fields
mmcif_dict = MMCIFDict()
# Initialize fields as empty arrays
if molecular_structures || !label
mmcif_dict["_atom_site.auth_asym_id"] = String[]
mmcif_dict["_atom_site.auth_comp_id"] = String[]
mmcif_dict["_atom_site.auth_atom_id"] = String[]
end
if label
mmcif_dict["_atom_site.label_asym_id"] = String[]
mmcif_dict["_atom_site.label_comp_id"] = String[]
mmcif_dict["_atom_site.label_atom_id"] = String[]
end
mmcif_dict["_atom_site.id"] = String[]
mmcif_dict["_atom_site.auth_seq_id"] = String[]
mmcif_dict["_atom_site.label_seq_id"] = String[]
mmcif_dict["_atom_site.Cartn_x"] = String[]
mmcif_dict["_atom_site.Cartn_y"] = String[]
mmcif_dict["_atom_site.Cartn_z"] = String[]
mmcif_dict["_atom_site.occupancy"] = String[]
mmcif_dict["_atom_site.B_iso_or_equiv"] = String[]
mmcif_dict["_atom_site.type_symbol"] = String[]
mmcif_dict["_atom_site.group_PDB"] = String[]
mmcif_dict["_atom_site.pdbx_PDB_model_num"] = String[]
mmcif_dict["_atom_site.pdbx_PDB_ins_code"] = String[]
mmcif_dict["_atom_site.label_alt_id"] = String[]
mmcif_dict["_atom_site.pdbx_formal_charge"] = String[]
atom_id_counter = 1
for res in residues
for atom in res.atoms
if molecular_structures || !label
push!(mmcif_dict["_atom_site.auth_asym_id"], res.id.chain)
push!(mmcif_dict["_atom_site.auth_comp_id"], res.id.name)
push!(mmcif_dict["_atom_site.auth_atom_id"], atom.atom)
end
if label
push!(mmcif_dict["_atom_site.label_asym_id"], res.id.chain)
push!(mmcif_dict["_atom_site.label_comp_id"], res.id.name)
push!(mmcif_dict["_atom_site.label_atom_id"], atom.atom)
end
push!(mmcif_dict["_atom_site.id"], string(atom_id_counter))
push!(mmcif_dict["_atom_site.auth_seq_id"], _resnumber(res.id.number))
push!(mmcif_dict["_atom_site.label_seq_id"], _resnumber(res.id.PDBe_number))
push!(mmcif_dict["_atom_site.Cartn_x"], string(atom.coordinates.x))
push!(mmcif_dict["_atom_site.Cartn_y"], string(atom.coordinates.y))
push!(mmcif_dict["_atom_site.Cartn_z"], string(atom.coordinates.z))
push!(mmcif_dict["_atom_site.occupancy"], string(atom.occupancy))
push!(mmcif_dict["_atom_site.B_iso_or_equiv"], atom.B)
push!(mmcif_dict["_atom_site.type_symbol"], atom.element)
push!(mmcif_dict["_atom_site.group_PDB"], res.id.group)
push!(mmcif_dict["_atom_site.pdbx_PDB_model_num"], res.id.model)
push!(mmcif_dict["_atom_site.pdbx_PDB_ins_code"], _inscode(res))
push!(
mmcif_dict["_atom_site.label_alt_id"],
isempty(atom.alt_id) ? "." : atom.alt_id,
)
push!(
mmcif_dict["_atom_site.pdbx_formal_charge"],
isempty(atom.charge) ? "?" : atom.charge,
)
atom_id_counter += 1
end
end
return mmcif_dict
end
function Utils.print_file(
io::IO,
residues::AbstractVector{PDBResidue},
format::Type{MMCIFFile};
label::Bool = false,
)
mmcif_dict = _pdbresidues_to_mmcifdict(residues, label = label)
writemmcif(io, mmcif_dict)
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2599 | """
The module `PDB` defines types and methods to work with protein structures inside Julia.
It is useful to link structural and sequential information, and
needed for measure the predictive performance at protein contact prediction of mutual information scores.
**Features**
- Read and parse PDF and PDBML files
- Calculate distance and contacts between atoms or residues
- Determine interaction between residues
```julia
using MIToS.PDB
```
"""
module PDB
import LightXML
using RecipesBase # Plots for PDB Residues
using AutoHashEquals
using StaticArrays
using OrderedCollections
using PairwiseListMatrices
using NamedArrays
using LinearAlgebra
using Statistics # mean
using MIToS.Utils
using Format
using JSON3
using Downloads
using Logging
using BioStructures
export # PDBResidues
PDBResidueIdentifier,
Coordinates,
PDBAtom,
PDBResidue,
squared_distance,
distance,
contact,
isresidue,
isatom,
select_residues,
residues,
@residues,
residuesdict,
@residuesdict,
select_atoms,
atoms,
@atoms,
findheavy,
findatoms,
findCB,
selectbestoccupancy,
bestoccupancy,
residuepairsmatrix,
proximitymean,
# AtomsData
covalentradius,
vanderwaalsradius,
check_atoms_for_interactions,
# Interaction
ishydrophobic,
isaromatic,
iscationic,
isanionic,
ishbonddonor,
ishbondacceptor,
hydrogenbond,
vanderwaals,
vanderwaalsclash,
covalent,
disulphide,
aromaticsulphur,
pication,
aromatic,
ionic,
hydrophobic,
# PDBParser
PDBFile,
# MMCIF
MMCIFFile,
# PDBMLParser
PDBML,
downloadpdb,
downloadpdbheader,
getpdbdescription,
# Kabsch
kabsch,
center!,
rmsd,
getCA,
CAmatrix,
coordinatesmatrix,
change_coordinates,
centeredcoordinates,
centeredresidues,
superimpose,
mean_coordinates,
rmsf,
# MIToS.Utils
All,
read_file,
parse_file,
write_file,
print_file,
# Sequences
is_aminoacid,
modelled_sequences,
# AlphaFoldDB
query_alphafolddb,
download_alphafold_structure,
# Imported from BioStructures
MolecularStructure,
# Imported from Base (and exported for docs)
any,
parse,
angle
include("PDBResidues.jl")
include("Sequences.jl")
include("AtomsData.jl")
include("Interaction.jl")
include("PDBParser.jl")
include("MMCIF.jl")
include("PDBMLParser.jl")
include("BioStructures.jl")
include("Kabsch.jl")
include("Plots.jl")
include("AlphaFoldDB.jl")
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 8943 | """
`PDBML <: FileFormat`
Protein Data Bank Markup Language (PDBML), a representation of PDB data in XML format.
"""
struct PDBML <: FileFormat end
function _get_text(elem, name, default = nothing)
sub = LightXML.find_element(elem, name)
if sub !== nothing
return (LightXML.content(sub))
end
if default === nothing
throw(ErrorException("There is not $name for $elem"))
else
default
end
end
_get_ins_code(elem) = _get_text(elem, "pdbx_PDB_ins_code", "")
function _get_atom_iterator(document::LightXML.XMLDocument)
pdbroot = LightXML.root(document)
LightXML.child_elements(
LightXML.get_elements_by_tagname(pdbroot, "atom_siteCategory")[1],
)
end
"""
Used for parsing a PDB file into `Vector{PDBResidue}`
"""
function _generate_residues(
residue_dict::OrderedDict{PDBResidueIdentifier,Vector{PDBAtom}},
occupancyfilter::Bool = false,
)
if occupancyfilter
return (PDBResidue[PDBResidue(k, bestoccupancy(v)) for (k, v) in residue_dict])
else
return (PDBResidue[PDBResidue(k, v) for (k, v) in residue_dict])
end
end
"""
`parse_file(pdbml, ::Type{PDBML}; chain=All, model=All, group=All, atomname=All, onlyheavy=false, label=true, occupancyfilter=false)`
Reads a `LightXML.XMLDocument` representing a pdb file.
Returns a list of `PDBResidue`s (view `MIToS.PDB.PDBResidues`).
Setting `chain`, `model`, `group`, `atomname` and `onlyheavy` values can be used to select
of a subset of all residues. If not set, all residues are returned. If the keyword
argument `label` (default: `true`) is `false`,the **auth_** attributes will be use instead
of the **label_** attributes for `chain`, `atom` and residue `name` fields. The **auth_**
attributes are alternatives provided by an author in order to match the
identification/values used in the publication that describes the structure. If the
keyword argument `occupancyfilter` (default: `false`) is `true`, only the atoms with the
best occupancy are returned.
"""
function Utils.parse_file(
pdbml::LightXML.XMLDocument,
::Type{PDBML};
chain::Union{String,Type{All}} = All,
model::Union{String,Type{All}} = All,
group::Union{String,Type{All}} = All,
atomname::Union{String,Type{All}} = All,
onlyheavy::Bool = false,
label::Bool = true,
occupancyfilter::Bool = false,
)
residues = Vector{PDBResidue}()
prefix = label ? "label" : "auth"
chain_attribute = string(prefix, "_asym_id")
atom_attribute = string(prefix, "_atom_id")
comp_attribute = string(prefix, "_comp_id")
residue_id = PDBResidueIdentifier("", "", "", "", "", "")
atoms = _get_atom_iterator(pdbml)
for atom in atoms
atom_name = _get_text(atom, atom_attribute)
if !_is(atom_name, atomname)
continue
end
element = _get_text(atom, "type_symbol")
if onlyheavy && element == "H"
continue
end
atom_group = _get_text(atom, "group_PDB")
if !_is(atom_group, group)
continue
end
atom_chain = _get_text(atom, chain_attribute)
if !_is(atom_chain, chain)
continue
end
atom_model = _get_text(atom, "pdbx_PDB_model_num")
if !_is(atom_model, model)
continue
end
PDBe_number = _get_text(atom, "label_seq_id")
# Residue_No _atom_site.auth_seq_id
# Ins_Code _atom_site.pdbx_PDB_ins_code
PDB_number = string(_get_text(atom, "auth_seq_id"), _get_ins_code(atom))
name = _get_text(atom, comp_attribute)
if (residue_id.PDBe_number != PDBe_number) ||
(residue_id.number != PDB_number) ||
(residue_id.name != name) ||
(residue_id.chain != atom_chain) ||
(residue_id.group != atom_group) ||
(residue_id.model != atom_model)
n_res = length(residues)
if occupancyfilter && n_res > 0
residues[n_res].atoms = bestoccupancy(residues[n_res].atoms)
end
residue_id = PDBResidueIdentifier(
PDBe_number,
PDB_number,
name,
atom_group,
atom_model,
atom_chain,
)
push!(residues, PDBResidue(residue_id, Vector{PDBAtom}()))
end
x = parse(Float64, _get_text(atom, "Cartn_x"))
y = parse(Float64, _get_text(atom, "Cartn_y"))
z = parse(Float64, _get_text(atom, "Cartn_z"))
occupancy = parse(Float64, _get_text(atom, "occupancy"))
B = _get_text(atom, "B_iso_or_equiv")
alt_id = _get_text(atom, "label_alt_id")
charge = _get_text(atom, "pdbx_formal_charge", "")
push!(
residues[end].atoms,
PDBAtom(Coordinates(x, y, z), atom_name, element, occupancy, B, alt_id, charge),
)
end
if occupancyfilter
residues[end].atoms = bestoccupancy(residues[end].atoms)
end
residues
end
# Download PDB
# ============
function _inputnameforgzip(outfile)
if endswith(outfile, ".gz")
return (outfile)
end
string(outfile, ".gz")
end
_file_extension(format::Type{MMCIFFile}) = ".cif.gz"
_file_extension(format::Type{PDBML}) = ".xml.gz"
_file_extension(format::Type{PDBFile}) = ".pdb.gz"
"""
downloadpdb(pdbcode::String; format::Type{T} = MMCIFFile, filename, baseurl, kargs...)
It downloads a gzipped PDB file from PDB database.
It requires a four character `pdbcode`.
Its default `format` is `MMCIFFile` (mmCIF) and It uses the `baseurl`
"http://www.rcsb.org/pdb/files/".
`filename` is the path/name of the output file.
This function calls `MIToS.Utils.download_file` that calls `Downloads.download`. So, you
can use keyword arguments, such as `headers`, from that function.
"""
function downloadpdb(
pdbcode::String;
format::Type{T} = MMCIFFile,
filename::String = uppercase(pdbcode) * _file_extension(format),
baseurl::String = "http://www.rcsb.org/pdb/files/",
kargs...,
) where {T<:FileFormat}
if check_pdbcode(pdbcode)
pdbfilename = uppercase(pdbcode) * _file_extension(format)
filename = _inputnameforgzip(filename)
sepchar = endswith(baseurl, "/") ? "" : "/"
download_file(string(baseurl, sepchar, pdbfilename), filename; kargs...)
else
throw(ErrorException("$pdbcode is not a correct PDB code"))
end
filename
end
# RESTful PDB interface
# =====================
"""
_escape_url_query(query::String)::String
This function use the percent-encoding to escape the characters that are not allowed in a URL.
"""
function _escape_url_query(query::String)::String
# Characters that do not need to be percent-encoded
unreserved =
Set{Char}("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~")
encoded_url = IOBuffer()
for byte in codeunits(query)
char = Char(byte)
if char in unreserved
print(encoded_url, char)
else
print(encoded_url, '%')
print(encoded_url, uppercase(string(Int(char), base = 16, pad = 2)))
end
end
String(take!(encoded_url))
end
function _graphql_query(pdbcode::String)
"""
{
entry(entry_id: "$pdbcode") {
entry {
id
}
rcsb_entry_info {
experimental_method
assembly_count
resolution_combined
}
rcsb_accession_info {
initial_release_date
}
polymer_entities {
rcsb_polymer_entity_container_identifiers {
entity_id
auth_asym_ids
}
entity_poly {
rcsb_entity_polymer_type
}
}
}
}
""" |> _escape_url_query
end
function _pdbheader(pdbcode::String; kargs...)
pdbcode = uppercase(pdbcode)
if check_pdbcode(pdbcode)
with_logger(ConsoleLogger(stderr, Logging.Warn)) do
body = IOBuffer()
Downloads.request(
"https://data.rcsb.org/graphql?query=$(_graphql_query(pdbcode))";
method = "GET",
output = body,
kargs...,
)
String(take!(body))
end
else
throw(ErrorException("$pdbcode is not a correct PDB code"))
end
end
"""
It downloads a JSON file containing the PDB header information.
"""
function downloadpdbheader(pdbcode::String; filename::String = tempname(), kargs...)
open(filename, "w") do fh
write(fh, _pdbheader(pdbcode; kargs...))
end
filename
end
"""
Access general information about a PDB entry (e.g., Header information) using the
GraphQL interface of the PDB database. It parses the JSON answer into a `JSON3.Object` that
can be used as a dictionary.
"""
function getpdbdescription(pdbcode::String; kargs...)
JSON3.read(_pdbheader(pdbcode; kargs...))["data"]["entry"]
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 12863 | """
`PDBFile <: FileFormat`
Protein Data Bank (PDB) format.
It provides a standard representation for macromolecular
structure data derived from X-ray diffraction and NMR studies.
"""
struct PDBFile <: FileFormat end
function _parse_residueidentifier(line::String, atom_chain, line_id, actual_model)
# 23 - 26 Integer Residue sequence number.
# 27 AChar Code for insertion of residues.
PDB_number = String(strip(SubString(line, 23, 27), ' '))
name = String(strip(SubString(line, 18, 20), ' '))
PDBResidueIdentifier("", PDB_number, name, line_id, actual_model, atom_chain)
end
function _parse_pdbatom(line::String, atom_name, element)
x = parse(Float64, SubString(line, 31, 38))
y = parse(Float64, SubString(line, 39, 46))
z = parse(Float64, SubString(line, 47, 54))
occupancy = parse(Float64, SubString(line, 55, 60))
B = String(strip(SubString(line, 61, 66), ' '))
alt_id = strip(String(SubString(line, 17, 17)))
if 80 ≤ ncodeunits(line)
charge = String(strip(SubString(line, 79, 80)))
else
charge = ""
end
PDBAtom(Coordinates(x, y, z), atom_name, element, occupancy, B, alt_id, charge)
end
"""
`parse_file(io, ::Type{PDBFile}; chain=All, model=All, group=All, atomname=All, onlyheavy=false, occupancyfilter=false)`
Reads a text file of a PDB entry.
Returns a list of `PDBResidue` (view `MIToS.PDB.PDBResidues`).
Setting `chain`, `model`, `group`, `atomname` and `onlyheavy` values
can be used to select of a subset of all residues. Group can be `"ATOM"`
or `"HETATM"`. If not set, all residues are returned.
If the keyword argument `occupancyfilter` (default: `false`) is `true`,
only the atoms with the best occupancy are returned.
"""
function Utils.parse_file(
io::Union{IO,String},
::Type{PDBFile};
chain::Union{String,Type{All}} = All,
model::Union{String,Type{All}} = All,
group::Union{String,Type{All}} = All,
atomname::Union{String,Type{All}} = All,
onlyheavy::Bool = false,
occupancyfilter::Bool = false,
)
residues = Vector{PDBResidue}()
model_counter = 0
actual_model = "1"
is_model = _is(actual_model, model)
previous_used_line = ""
residue_id = PDBResidueIdentifier("", "", "", "", "", "")
for line::String in lineiterator(io)
line_id = match(r"^\S{0,6}", line).match
if line_id == "MODEL"
model_counter += 1
actual_model = string(model_counter)
is_model = _is(actual_model, model)
end
if (group === All && (line_id == "ATOM" || line_id == "HETATM")) ||
(line_id == group)
atom_chain = string(line[22])
atom_name = String(strip(SubString(line, 13, 16), ' '))
# PDB files generated by Foldseek have only CA atoms and no element identifier
# because they use only the first 66 columns of the file. We prefer using
# `ncodeunits` over `length` since it's faster and `line` consists solely of
# ASCII characters.
element = if 78 ≤ ncodeunits(line)
String(strip(SubString(line, 77, 78), ' '))
else
"" # missing element identifier
end
if is_model &&
_is(atom_chain, chain) &&
_is(atom_name, atomname) &&
(!onlyheavy || element != "H")
if (previous_used_line == "") ||
(residue_id.group != line_id) ||
(residue_id.model != actual_model) ||
(SubString(previous_used_line, 18, 27) != SubString(line, 18, 27))
n_res = length(residues)
if occupancyfilter && n_res > 0
residues[n_res].atoms = bestoccupancy(residues[n_res].atoms)
end
residue_id =
_parse_residueidentifier(line, atom_chain, line_id, actual_model)
push!(residues, PDBResidue(residue_id, Vector{PDBAtom}()))
end
atom_data = _parse_pdbatom(line, atom_name, element)
push!(residues[end].atoms, atom_data)
previous_used_line = line
end
end
end
if occupancyfilter
residues[end].atoms = bestoccupancy(residues[end].atoms)
end
residues
end
# Print PDB
# =========
# ATOM & HETATM
# COLUMNS DATA TYPE CONTENTS
# --------------------------------------------------------------------------------
# 1 - 6 Record name "ATOM "
# 7 - 11 Integer Atom serial number.
# 13 - 16 Atom Atom name.
# 17 Character Alternate location indicator.
# 18 - 20 Residue name Residue name.
# 22 Character Chain identifier.
# 23 - 26 Integer Residue sequence number.
# 27 AChar Code for insertion of residues.
# 31 - 38 Real(8.3) Orthogonal coordinates for X in Angstroms.
# 39 - 46 Real(8.3) Orthogonal coordinates for Y in Angstroms.
# 47 - 54 Real(8.3) Orthogonal coordinates for Z in Angstroms.
# 55 - 60 Real(6.2) Occupancy.
# 61 - 66 Real(6.2) Temperature factor (Default = 0.0).
# 73 - 76 LString(4) Segment identifier, left-justified.
# 77 - 78 LString(2) Element symbol, right-justified.
# 79 - 80 LString(2) Charge on the atom.
# Example:
# 1 2 3 4 5 6 7 8
# 12345678901234567890123456789012345678901234567890123456789012345678901234567890
# ATOM 145 N VAL A 25 32.433 16.336 57.540 1.00 11.92 A1 N
# ATOM 146 CA VAL A 25 31.132 16.439 58.160 1.00 11.85 A1 C
# ATOM 147 C VAL A 25 30.447 15.105 58.363 1.00 12.34 A1 C
# ATOM 148 O VAL A 25 29.520 15.059 59.174 1.00 15.65 A1 O
# ATOM 149 CB AVAL A 25 30.385 17.437 57.230 0.28 13.88 A1 C
# ATOM 150 CB BVAL A 25 30.166 17.399 57.373 0.72 15.41 A1 C
# ATOM 151 CG1AVAL A 25 28.870 17.401 57.336 0.28 12.64 A1 C
# ATOM 152 CG1BVAL A 25 30.805 18.788 57.449 0.72 15.11 A1 C
# ATOM 153 CG2AVAL A 25 30.835 18.826 57.661 0.28 13.58 A1 C
# ATOM 154 CG2BVAL A 25 29.909 16.996 55.922 0.72 13.25 A1 C
#
# HETATM 1357 MG MG 168 4.669 34.118 19.123 1.00 3.16 MG2+
# HETATM 3835 FE HEM 1 17.140 3.115 15.066 1.00 14.14 FE3+
const _Format_PDB_ATOM = FormatExpr(
# 1 2 3 4 5 6 7 8
# 12345678901234567890123456789012345678901234567890123456789012345678901234567890
# > <> < > <|> < |> <| > <> <> <> <> < > <><><
"{:<6}{:>5d} {:<4}{:>1}{:>3} {:>1}{:>4}{:>1} {:>8.3f}{:>8.3f}{:>8.3f}{:>6.2f}{:>6} {:<4}{:>2}{:>2}\n",
)
# Models are numbered sequentially beginning with 1.
# Each MODEL must have a corresponding ENDMDL record.
# COLUMNS DATA TYPE CONTENTS
# --------------------------------------------------------------------------------
# 1 - 6 Record name "MODEL "
# 11 - 14 Integer Model serial number
# Example:
# 1 2 3 4 5 6 7 8
# 12345678901234567890123456789012345678901234567890123456789012345678901234567890
# MODEL 1
# ATOM 1 N ALA 1 11.104 6.134 -6.504 1.00 0.00 N
# ATOM 294 2HG GLU 18 -13.630 -3.769 0.160 1.00 0.00 H
# TER 295 GLU 18
# ENDMDL
const _Format_PDB_MODEL = FormatExpr(
# 1
# 12345678901234
# MODEL 1
"MODEL {:>4}\n",
)
# TER
# Indicates the end of a list of ATOM/HETATM records for a chain
# The TER records occur in the coordinate section of the entry, and indicate
# the last residue presented for each polypeptide and/or nucleic acid chain for
# which there are coordinates.
# The TER record has the same residue name, chain identifier, sequence number
# and insertion code as the terminal residue. The serial number of the TER
# record is one number greater than the serial number of the ATOM/HETATM
# preceding the TER.
# The residue name appearing on the TER record must be the same as the residue name
# of the immediately preceding ATOM or non-water HETATM record.
# COLUMNS DATA TYPE CONTENTS
# --------------------------------------------------------------------------------
# 1 - 6 Record name "TER "
# 7 - 11 Integer Serial number
# 18 - 20 Residue name Residue name
# 22 Character Chain identifier
# 23 - 26 Integer Residue sequence number
# 27 AChar Insertion code
# Example:
# 1 2 3 4 5 6 7 8
# 12345678901234567890123456789012345678901234567890123456789012345678901234567890
# ATOM 4150 H ALA A 431 8.674 16.036 12.858 1.00 0.00 H
# TER 4151 ALA A 431
# HETATM 1415 O2 BLE P 1 13.775 30.147 14.862 1.09 20.95 O
# TER 1416 BLE P 1
const _Format_PDB_TER = FormatExpr(
# TER 4151 ALA A 431
# 1 2 3 4 5 6 7 8
# 12345678901234567890123456789012345678901234567890123456789012345678901234567890
"TER {:>5d} {:>3} {:>1}{:>4}{:>1}\n",
)
function _get_residue_number(res::PDBResidue)
number = match(r"(-?\d+)(\D?)", res.id.number)
if number === nothing
throw(ErrorException("Invalid residue number: $(res.id.number)"))
end
number
end
function Utils.print_file(
io::IO,
res::PDBResidue,
format::Type{PDBFile},
atom_index::Int,
serial_number::Int,
)
number = _get_residue_number(res)
atomname = res.atoms[atom_index].atom
printfmt(
io,
_Format_PDB_ATOM,
res.id.group,
serial_number,
length(atomname) <= 3 ? string(" ", atomname) : atomname, # It works with NACCESS
res.atoms[atom_index].alt_id,
res.id.name,
res.id.chain,
number[1],
number[2],
res.atoms[atom_index].coordinates.x,
res.atoms[atom_index].coordinates.y,
res.atoms[atom_index].coordinates.z,
res.atoms[atom_index].occupancy,
res.atoms[atom_index].B,
" ",
res.atoms[atom_index].element,
res.atoms[atom_index].charge,
)
serial_number + 1
end
function Utils.print_file(io::IO, res::PDBResidue, format::Type{PDBFile}, start::Int = 1)
next = start
for i in eachindex(res.atoms)
next = print_file(io, res, format, i, next)
end
nothing
end
function Utils.print_file(
io::IO,
reslist::AbstractVector{PDBResidue},
format::Type{PDBFile},
start::Int = 1,
)
next = start
use_model = length(unique(map(res -> res.id.model, reslist))) > 1
if use_model
model = "START"
end
for resindex in eachindex(reslist)
res = reslist[resindex]
# MODEL
if use_model
if model != res.id.model
if model != "START"
println(io, "ENDMDL")
end
printfmt(io, _Format_PDB_MODEL, res.id.model)
end
model = res.id.model
end
# TER
# MIToS only prints TER for the ATOM group if the chain changes.
# Some modified residues are annotated as HETATM in the middle of the ATOM chain:
# TER can not be printed from ATOM to HETATM if the chain doesn’t change.
if resindex > 1
previous_res = reslist[resindex-1]
if (previous_res.id.group == "ATOM") && (previous_res.id.chain != res.id.chain)
number = _get_residue_number(previous_res)
printfmt(
io,
_Format_PDB_TER,
next,
previous_res.id.name,
previous_res.id.chain,
number[1],
number[2],
)
next += 1
end
end
# ATOM/HETATM
for i in eachindex(res.atoms)
next = print_file(io, res, format, i, next)
end
end
if use_model
println(io, "ENDMDL")
end
println(io, "END")
nothing
end
@doc """
`print_file(io, res, format::Type{PDBFile})`
`print_file(res, format::Type{PDBFile})`
Print a `PDBResidue` or a vector of `PDBResidue`s in PDB format.
""" print_file
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 31627 | # PDB Types
# =========
"""
A `PDBResidueIdentifier` object contains the information needed to identity PDB residues.
It has the following fields that you can access at any moment for query purposes:
- `PDBe_number` : It's only used when a PDBML is readed (PDBe number as a string).
- `number` : PDB residue number, it includes insertion codes, e.g. `"34A"`.
- `name` : Three letter residue name in PDB, e.g. `"LYS"`.
- `group` : It can be `"ATOM"` or `"HETATM"`.
- `model` : The model number as a string, e.g. `"1"`.
- `chain` : The chain as a string, e.g. `"A"`.
"""
@auto_hash_equals struct PDBResidueIdentifier
PDBe_number::String # PDBe
number::String # PDB
name::String
group::String
model::String
chain::String
end
"""
A `Coordinates` object is a fixed size vector with the coordinates x,y,z.
"""
@auto_hash_equals struct Coordinates <: FieldVector{3,Float64}
x::Float64
y::Float64
z::Float64
function Coordinates(a::NTuple{3,Real})
new(a[1], a[2], a[3])
end
Coordinates(x, y, z) = new(x, y, z)
end
"""
A `PDBAtom` object contains the information from a PDB atom, without information of the
residue. It has the following fields that you can access at any moment for query purposes:
- `coordinates` : x,y,z coordinates, e.g. `Coordinates(109.641,73.162,42.7)`.
- `atom` : Atom name, e.g. `"CA"`.
- `element` : Element type of the atom, e.g. `"C"`.
- `occupancy` : A float number with the occupancy, e.g. `1.0`.
- `B` : B factor as a string, e.g. `"23.60"`.
- `alt_id` : Alternative location ID, e.g. `"A"`.
- `charge` : Charge of the atom, e.g. `"0"`.
"""
@auto_hash_equals struct PDBAtom
coordinates::Coordinates
atom::String
element::String
occupancy::Float64
B::String
alt_id::String
charge::String
end
"""
A `PDBResidue` object contains all the information about a PDB residue. It has the
following fields that you can access at any moment for query purposes:
- `id` : A `PDBResidueIdentifier` object.
- `atoms` : A vector of `PDBAtom`s.
"""
@auto_hash_equals mutable struct PDBResidue
id::PDBResidueIdentifier
atoms::Vector{PDBAtom}
end
Base.length(res::PDBResidue) = length(res.atoms)
# Copy
# ====
# copy is not defined for String objects, using deepcopy instead
for f in (:copy, :deepcopy)
@eval Base.$(f)(res::PDBResidueIdentifier) = PDBResidueIdentifier(
deepcopy(res.PDBe_number),
deepcopy(res.number),
deepcopy(res.name),
deepcopy(res.group),
deepcopy(res.model),
deepcopy(res.chain),
)
@eval Base.$(f)(res::Coordinates) = Coordinates($(f)(res.x), $(f)(res.y), $(f)(res.z))
@eval Base.$(f)(res::PDBAtom) = PDBAtom(
$(f)(res.coordinates),
deepcopy(res.atom),
deepcopy(res.element),
$(f)(res.occupancy),
deepcopy(res.B),
deepcopy(res.alt_id),
deepcopy(res.charge),
)
@eval Base.$(f)(res::PDBResidue) = PDBResidue($(f)(res.id), $(f)(res.atoms))
end
# Coordinates
# ===========
Base.vec(a::Coordinates) = Float64[a.x, a.y, a.z]
# Distances and geometry
# ----------------------
@inline function squared_distance(a::Coordinates, b::Coordinates)
(a.x - b.x)^2 + (a.y - b.y)^2 + (a.z - b.z)^2
end
"""
It calculates the squared euclidean distance, i.e. it doesn't spend time in `sqrt`
"""
squared_distance(a::PDBAtom, b::PDBAtom) = squared_distance(a.coordinates, b.coordinates)
"""
It calculates the squared euclidean distance.
"""
distance(a::Coordinates, b::Coordinates) = sqrt(squared_distance(a, b))
distance(a::PDBAtom, b::PDBAtom) = distance(a.coordinates, b.coordinates)
function _squared_limit_contact(a::Coordinates, b::Coordinates, limit::AbstractFloat)
squared_distance(a, b) <= limit
end
function _squared_limit_contact(a::PDBAtom, b::PDBAtom, limit::AbstractFloat)
_squared_limit_contact(a.coordinates, b.coordinates, limit)
end
"""
`contact(a::Coordinates, b::Coordinates, limit::AbstractFloat)`
It returns true if the distance is less or equal to the limit.
It doesn't call `sqrt` because it does `squared_distance(a,b) <= limit^2`.
"""
function contact(a::Coordinates, b::Coordinates, limit::AbstractFloat)
_squared_limit_contact(a, b, limit^2)
end
function contact(a::PDBAtom, b::PDBAtom, limit::Float64)
contact(a.coordinates, b.coordinates, limit)
end
"""
`angle(a::Coordinates, b::Coordinates, c::Coordinates)`
Angle (in degrees) at `b` between `a-b` and `b-c`
"""
function Base.angle(a::Coordinates, b::Coordinates, c::Coordinates)
A = b - a
B = b - c
norms = (norm(A) * norm(B))
if norms != 0
return (acosd(dot(A, B) / norms))
else
return (0.0)
end
end
function Base.angle(a::PDBAtom, b::PDBAtom, c::PDBAtom)
angle(a.coordinates, b.coordinates, c.coordinates)
end
LinearAlgebra.cross(a::PDBAtom, b::PDBAtom) = cross(a.coordinates, b.coordinates)
# Find Residues/Atoms
# ===================
"""
ResidueQueryTypes = Union{String,Type{All},Regex,Function}
This type is used to indicate the type of the keyword arguments of functions that filter
residues or atoms, such as [`isresidue`](@ref), [`residues`](@ref), [`residuesdict`](@ref)
and [`atoms`](@ref).
"""
const ResidueQueryTypes = Union{String,Type{All},Regex,Function}
@inline _is(element::String, all::Type{All}) = true
@inline _is(element::String, value::String) = element == value
@inline _is(element::String, regex::Regex) = occursin(regex, element)
@inline _is(element::String, f::Function) = f(element)
# isresidue
# ---------
function isresidue(id::PDBResidueIdentifier, model, chain, group, residue)
Base.depwarn(
"isresidue using positional arguments is deprecated in favor of keyword arguments: isresidue(id; model, chain, group, residue)",
:isresidue,
force = true,
)
isresidue(id, model = model, chain = chain, group = group, residue = residue)
end
function isresidue(res::PDBResidue, model, chain, group, residue)
Base.depwarn(
"isresidue using positional arguments is deprecated in favor of keyword arguments: isresidue(res; model, chain, group, residue)",
:isresidue,
force = true,
)
isresidue(res.id, model = model, chain = chain, group = group, residue = residue)
end
function isresidue(
id::PDBResidueIdentifier;
model::ResidueQueryTypes = All,
chain::ResidueQueryTypes = All,
group::ResidueQueryTypes = All,
residue::ResidueQueryTypes = All,
)
_is(id.model, model) &&
_is(id.chain, chain) &&
_is(id.group, group) &&
_is(id.number, residue)
end
"""
isresidue(res; model=All, chain=All, group=All, residue=All)
This function tests if a `PDBResidue` has the indicated `model`, `chain`, `group`
and `residue` names/numbers. You can use the type `All` (default value) to avoid
filtering that level.
"""
function isresidue(
res::PDBResidue;
model::ResidueQueryTypes = All,
chain::ResidueQueryTypes = All,
group::ResidueQueryTypes = All,
residue::ResidueQueryTypes = All,
)
isresidue(res.id, model = model, chain = chain, group = group, residue = residue)
end
"""
It tests if the atom has the indicated atom name.
"""
isatom(atom::PDBAtom, name) = _is(atom.atom, name)
# select_residues
# ---------------
"""
select_residues(residue_list; model=All, chain=All, group=All, residue=All)
This function returns a new vector with the selected subset of residues from a list of
residues. You can use the keyword arguments `model`, `chain`, `group` and `residue` to
select the residues. You can use the type `All` (default value) to avoid filtering at
a particular level.
"""
function select_residues(
residue_list::AbstractArray{PDBResidue,N};
model::ResidueQueryTypes = All,
chain::ResidueQueryTypes = All,
group::ResidueQueryTypes = All,
residue::ResidueQueryTypes = All,
) where {N}
filter(
res ->
isresidue(res, model = model, chain = chain, group = group, residue = residue),
residue_list,
)
end
"""
The `residues` function for `AbstractArray{PDBResidue,N}` is **deprecated**. Use the
`select_residues` function instead. So, `residues(residue_list, model, chain, group, residue)`
becomes `select_residues(residue_list; model=model, chain=chain, group=group, residue=residue)`.
"""
function residues(
residue_list::AbstractArray{PDBResidue,N},
model,
chain,
group,
residue,
) where {N}
Base.depwarn(
"residues is deprecated in favor of select_residues(residue_list; model, chain, group, residue)",
:residues,
force = true,
)
select_residues(
residue_list;
model = model,
chain = chain,
group = group,
residue = residue,
)
end
"""
`@residues ... model ... chain ... group ... residue ...`
These return a new vector with the selected subset of residues from a list of residues. You
can use the type `All` to avoid filtering that option.
**DEPRECATED:** This macro is deprecated. Use the [`select_residues`](@ref) function instead.
"""
macro residues(
residue_list,
model::Symbol,
m,
chain::Symbol,
c,
group::Symbol,
g,
residue::Symbol,
r,
)
if model == :model && chain == :chain && group == :group && residue == :residue
Base.depwarn(
"Using the @residues macro is deprecated in favor of the select_residues function: select_residues(residue_list; model, chain, group, residue)",
Symbol("@residues"),
force = true,
)
return :(select_residues(
$(esc(residue_list));
model = $(esc(m)),
chain = $(esc(c)),
group = $(esc(g)),
residue = $(esc(r)),
))
else
throw(
ArgumentError(
"The signature is @residues ___ model ___ chain ___ group ___ residue ___",
),
)
end
end
# residuesdict
# ------------
"""
residuesdict(residue_list; model=All, chain=All, group=All, residue=All)
This function returns a dictionary (using PDB residue numbers as keys) with the selected
subset of residues. The residues are selected using the keyword arguments `model`, `chain`,
`group` and `residue`. You can use the type `All` (default value) to avoid filtering at
a particular level.
"""
function residuesdict(
residue_list::AbstractArray{PDBResidue,N};
model::ResidueQueryTypes = All,
chain::ResidueQueryTypes = All,
group::ResidueQueryTypes = All,
residue::ResidueQueryTypes = All,
) where {N}
dict = sizehint!(OrderedDict{String,PDBResidue}(), length(residue_list))
for res in residue_list
if isresidue(res, model = model, chain = chain, group = group, residue = residue)
dict[res.id.number] = res
end
end
dict
end
# Deprecation warning for the positional arguments version
function residuesdict(
residue_list::AbstractArray{PDBResidue,N},
model,
chain,
group,
residue,
) where {N}
Base.depwarn(
"residuesdict using positional arguments is deprecated in favor of keyword arguments: residuesdict(residue_list; model, chain, group, residue)",
:residuesdict,
force = true,
)
residuesdict(
residue_list;
model = model,
chain = chain,
group = group,
residue = residue,
)
end
"""
`@residuesdict ... model ... chain ... group ... residue ...`
This macro returns a dictionary (using PDB residue numbers as keys) with the selected
subset of residues from a list of residues. You can use the type `All` to avoid filtering
that option.
**DEPRECATED:** This macro is deprecated. Use the [`residuesdict`](@ref) function instead.
"""
macro residuesdict(
residue_list,
model::Symbol,
m,
chain::Symbol,
c,
group::Symbol,
g,
residue::Symbol,
r,
)
if model == :model && chain == :chain && group == :group && residue == :residue
Base.depwarn(
"Using @residuesdict macro is deprecated in favor of residuesdict function with keyword arguments: residuesdict(residue_list; model, chain, group, residue)",
Symbol("@residuesdict"),
force = true,
)
return :(residuesdict(
$(esc(residue_list));
model = $(esc(m)),
chain = $(esc(c)),
group = $(esc(g)),
residue = $(esc(r)),
))
else
throw(
ArgumentError(
"The signature is @residuesdict ___ model ___ chain ___ group ___ residue ___",
),
)
end
end
# select_atoms
# ------------
"""
select_atoms(residue_list; model=All, chain=All, group=All, residue=All, atom=All, alt_id=All, charge=All)
This function returns a vector of `PDBAtom`s with the selected subset of atoms from a list
of residues. The atoms are selected using the keyword arguments `model`, `chain`, `group`,
`residue`, `atom`, `alt_id`, and `charge`. You can use the type `All` (default value) to avoid
filtering at a particular level.
"""
function select_atoms(
residue_list::AbstractArray{PDBResidue,N};
model::ResidueQueryTypes = All,
chain::ResidueQueryTypes = All,
group::ResidueQueryTypes = All,
residue::ResidueQueryTypes = All,
atom::ResidueQueryTypes = All,
alt_id::ResidueQueryTypes = All,
charge::ResidueQueryTypes = All,
) where {N}
atom_list = PDBAtom[]
@inbounds for r in residue_list
if isresidue(r, model = model, chain = chain, group = group, residue = residue)
for a in r.atoms
if isatom(a, atom) && _is(a.alt_id, alt_id) && _is(a.charge, charge)
push!(atom_list, a)
end
end
end
end
atom_list
end
# Deprecation warning for the positional arguments version
function atoms(
residue_list::AbstractArray{PDBResidue,N},
model,
chain,
group,
residue,
atom,
) where {N}
Base.depwarn(
"atoms is deprecated in favor of select_atoms(residue_list; model, chain, group, residue, atom)",
:atoms,
force = true,
)
select_atoms(
residue_list;
model = model,
chain = chain,
group = group,
residue = residue,
atom = atom,
)
end
"""
`@atoms ... model ... chain ... group ... residue ... atom ...`
These return a vector of `PDBAtom`s with the selected subset of atoms from a list of
residues. You can use the type `All` to avoid filtering that option.
**DEPRECATED:** This macro is deprecated. Use the [`select_atoms`](@ref) function instead.
"""
macro atoms(
residue_list,
model::Symbol,
m,
chain::Symbol,
c,
group::Symbol,
g,
residue::Symbol,
r,
atom::Symbol,
a,
)
if model == :model &&
chain == :chain &&
group == :group &&
residue == :residue &&
atom == :atom
Base.depwarn(
"Using the @atoms macro is deprecated in favor of the select_atoms function: select_atoms(residue_list; model, chain, group, residue, atom)",
Symbol("@atoms"),
force = true,
)
return :(select_atoms(
$(esc(residue_list));
model = $(esc(m)),
chain = $(esc(c)),
group = $(esc(g)),
residue = $(esc(r)),
atom = $(esc(a)),
))
else
throw(
ArgumentError(
"The signature is @atoms ___ model ___ chain ___ group ___ residue ___ atom ___",
),
)
end
end
# Special find...
# ===============
# This _find implementation should be faster than Base.find. It is faster than Base.find
# for small vectors (like atoms) because and allocations is't a problem:
# https://discourse.julialang.org/t/avoid-calling-push-in-base-find/1336
function _find(f::Function, vector::Vector{T}) where {T}
N = length(vector)
indices = Array{Int}(undef, N)
j = 0
@inbounds for i = 1:N
if f(vector[i])
j += 1
indices[j] = i
end
end
resize!(indices, j)
end
"""
Returns a list with the index of the heavy atoms (all atoms except hydrogen) in
the `PDBResidue`
"""
function findheavy(atoms::Vector{PDBAtom})
_find(atom -> atom.element != "H", atoms)
end
findheavy(res::PDBResidue) = findheavy(res.atoms)
"""
`findatoms(res::PDBResidue, atom::String)`
Returns a index vector of the atoms with the given `atom` name.
"""
function findatoms(atoms::Vector{PDBAtom}, atom::String)
_find(a -> a.atom == atom, atoms)
end
findatoms(res::PDBResidue, atom::String) = findatoms(res.atoms, atom)
"""
Returns a vector of indices for `CB` (`CA` for `GLY`)
"""
function findCB(res::PDBResidue)
atom = res.id.name == "GLY" ? "CA" : "CB"
findatoms(res, atom)
end
# occupancy
# =========
"""
Takes a `PDBResidue` and a `Vector` of atom indices.
Returns the index value of the `Vector` with maximum occupancy.
"""
function selectbestoccupancy(atoms::Vector{PDBAtom}, indices::Vector{Int})
Ni = length(indices)
@assert Ni != 0 "There are no atom indices"
if Ni == 1
return (indices[1])
end
Na = length(atoms)
@assert Ni ≤ Na "There are more atom indices ($Ni) than atoms in the Residue ($Na)"
indice = 0
occupancy = -Inf
for i in indices
actual_occupancy = atoms[i].occupancy
if actual_occupancy > occupancy
occupancy = actual_occupancy
indice = i
end
end
return (indice)
end
selectbestoccupancy(res::PDBResidue, indices) = selectbestoccupancy(res.atoms, indices)
"""
Takes a `Vector` of `PDBAtom`s and returns a `Vector` of the `PDBAtom`s with best occupancy.
"""
function bestoccupancy(atoms::Vector{PDBAtom})::Vector{PDBAtom}
N = length(atoms)
if N == 0
@warn("There are no atoms.")
return (atoms)
elseif N == 1
return (atoms)
else
atomdict = sizehint!(OrderedDict{String,PDBAtom}(), N)
for atom in atoms
name = atom.atom
if haskey(atomdict, name)
if atom.occupancy > atomdict[name].occupancy
atomdict[name] = atom
end
else
atomdict[name] = atom
end
end
return (collect(values(atomdict)))
end
end
function bestoccupancy(res::PDBResidue) # TO DO: Test it!
new_res = copy(res)
new_res.atoms = bestoccupancy(res.atoms)
new_res
end
# More Distances
# ==============
function _update_squared_distance(a_atoms, b_atoms, i, j, dist)
actual_dist = squared_distance(a_atoms[i], b_atoms[j])
if actual_dist < dist
return (actual_dist)
else
return (dist)
end
end
"""
`squared_distance(A::PDBResidue, B::PDBResidue; criteria::String="All")`
Returns the squared distance between the residues `A` and `B`.
The available `criteria` are: `Heavy`, `All`, `CA`, `CB` (`CA` for `GLY`)
"""
function squared_distance(A::PDBResidue, B::PDBResidue; criteria::String = "All")
a = A.atoms
b = B.atoms
dist = Inf
if criteria == "All"
Na = length(a)
Nb = length(b)
@inbounds for i = 1:Na
for j = 1:Nb
dist = _update_squared_distance(a, b, i, j, dist)
end
end
elseif criteria == "Heavy"
indices_a = findheavy(a)
indices_b = findheavy(b)
if length(indices_a) != 0 && length(indices_b) != 0
for i in indices_a
for j in indices_b
dist = _update_squared_distance(a, b, i, j, dist)
end
end
end
elseif criteria == "CA"
indices_a = findatoms(a, "CA")
indices_b = findatoms(b, "CA")
if length(indices_a) != 0 && length(indices_b) != 0
for i in indices_a
for j in indices_b
dist = _update_squared_distance(a, b, i, j, dist)
end
end
end
elseif criteria == "CB"
indices_a = findCB(A) # findCB needs residues instead of atoms
indices_b = findCB(B)
if length(indices_a) != 0 && length(indices_b) != 0
for i in indices_a
for j in indices_b
dist = _update_squared_distance(a, b, i, j, dist)
end
end
end
end
dist
end
function distance(A::PDBResidue, B::PDBResidue; criteria::String = "All")
sqrt(squared_distance(A, B, criteria = criteria))
end
"""
`any(f::Function, a::PDBResidue, b::PDBResidue)`
Test if the function `f` is true for any pair of atoms between the residues `a` and `b`
"""
Base.any(f::Function, a::PDBResidue, b::PDBResidue) = any(f, a.atoms, b.atoms)
function Base.any(f::Function, a_atoms::Vector{PDBAtom}, b_atoms::Vector{PDBAtom})
@inbounds for a in a_atoms
for b in b_atoms
if f(a, b)
return (true)
end
end
end
return (false)
end
"""
`contact(A::PDBResidue, B::PDBResidue, limit::AbstractFloat; criteria::String="All")`
Returns `true` if the residues `A` and `B` are at contact distance (`limit`).
The available distance `criteria` are: `Heavy`, `All`, `CA`, `CB` (`CA` for `GLY`)
"""
function contact(
A::PDBResidue,
B::PDBResidue,
limit::AbstractFloat;
criteria::String = "All",
)
squared_limit = limit^2
a = A.atoms
b = B.atoms
if criteria == "All"
Na = length(a)
Nb = length(b)
@inbounds for i = 1:Na
ai = a[i]
for j = 1:Nb
if _squared_limit_contact(ai, b[j], squared_limit)
return (true)
end
end
end
elseif criteria == "Heavy"
indices_a = findheavy(a)
indices_b = findheavy(b)
if length(indices_a) != 0 && length(indices_b) != 0
@inbounds for i in indices_a
ai = a[i]
for j in indices_b
if _squared_limit_contact(ai, b[j], squared_limit)
return (true)
end
end
end
end
elseif criteria == "CA"
indices_a = findatoms(a, "CA")
indices_b = findatoms(b, "CA")
if length(indices_a) != 0 && length(indices_b) != 0
@inbounds for i in indices_a
ai = a[i]
for j in indices_b
if _squared_limit_contact(ai, b[j], squared_limit)
return (true)
end
end
end
end
elseif criteria == "CB"
indices_a = findCB(A) # findCB needs residues instead of atoms
indices_b = findCB(B)
if length(indices_a) != 0 && length(indices_b) != 0
@inbounds for i in indices_a
ai = a[i]
for j in indices_b
if _squared_limit_contact(ai, b[j], squared_limit)
return (true)
end
end
end
end
end
false
end
# Vectorize
# =========
# PLM
# ---
"""
It creates a `NamedArray` containing a `PairwiseListMatrix` where each element
(column, row) is identified with a `PDBResidue` from the input vector. You can indicate
the value type of the matrix (default to `Float64`), if the list should have the
diagonal values (default to `Val{false}`) and the diagonal values (default to `NaN`).
"""
function residuepairsmatrix(
residue_list::Vector{PDBResidue},
::Type{T},
::Type{Val{diagonal}},
diagonalvalue::T,
) where {T,diagonal}
plm = PairwiseListMatrix(T, length(residue_list), diagonal, diagonalvalue)
resnames = [
string(res.id.model, '_', res.id.chain, '_', res.id.group, '_', res.id.number)
for res in residue_list
]
nplm = setlabels(plm, resnames)
setdimnames!(nplm, ["Res1", "Res2"])
nplm::NamedArray{
T,
2,
PairwiseListMatrix{T,diagonal,Vector{T}},
NTuple{2,OrderedDict{String,Int}},
}
end
function residuepairsmatrix(residue_list::Vector{PDBResidue})
residuepairsmatrix(residue_list, Float64, Val{false}, NaN)
end
# Contacts and distances
# ----------------------
function squared_distance(residues::Vector{PDBResidue}; criteria::String = "All")
nplm = residuepairsmatrix(residues, Float64, Val{false}, 0.0)
plm = getarray(nplm)
@iterateupper plm false begin
list[k] = squared_distance(residues[i], residues[j], criteria = criteria)
end
nplm
end
"""
`contact(residues::Vector{PDBResidue}, limit::AbstractFloat; criteria::String="All")`
If `contact` takes a `Vector{PDBResidue}`, It returns a matrix with all the pairwise
comparisons (contact map).
"""
function contact(
residues::Vector{PDBResidue},
limit::AbstractFloat;
criteria::String = "All",
)
nplm = residuepairsmatrix(residues, Bool, Val{false}, true)
plm = getarray(nplm)
@iterateupper plm false begin
list[k] = contact(residues[i], residues[j], limit, criteria = criteria)
end
nplm
end
"""
`distance(residues::Vector{PDBResidue}; criteria::String="All")`
If `distance` takes a `Vector{PDBResidue}` returns a `PairwiseListMatrix{Float64, false}`
with all the pairwise comparisons (distance matrix).
"""
function distance(residues::Vector{PDBResidue}; criteria::String = "All")
nplm = residuepairsmatrix(residues, Float64, Val{false}, 0.0)
plm = getarray(nplm)
@iterateupper plm false begin
list[k] = distance(residues[i], residues[j], criteria = criteria)
end
nplm
end
# Proximity average
# -----------------
"""
`proximitymean` calculates the proximity mean/average for each residue as the average
score (from a `scores` list) of all the residues within a certain physical distance to a
given amino acid. The score of that residue is not included in the mean unless you set
`include` to `true`. The default values are 6.05 for the distance threshold/`limit` and
`"Heavy"` for the `criteria` keyword argument. This function allows to calculate pMI
(proximity mutual information) and pC (proximity conservation) as in *Buslje et. al. 2010*.
# References
- [Marino Buslje, Cristina, et al. "Networks of high mutual information define the
structural proximity of catalytic sites: implications for catalytic residue
identification." PLoS computational biology 6.11 (2010):
e1000978.](@cite marino2010networks)
"""
function proximitymean(
residues::Vector{PDBResidue},
scores::AbstractVector{T},
limit::T = 6.05;
criteria::String = "Heavy",
include::Bool = false,
) where {T<:AbstractFloat}
N = length(residues)
@assert N == length(scores) "Vectors must have the same length."
count = zeros(Int, N)
sum = zeros(T, N)
offset = include ? 0 : 1
@inbounds for i = 1:(N-offset)
res_i = residues[i]
for j = (i+offset):N
if include && (i == j)
count[i] += 1
sum[i] += scores[i]
elseif contact(res_i, residues[j], limit, criteria = criteria)
count[i] += 1
count[j] += 1
sum[i] += scores[j]
sum[j] += scores[i]
end
end
end
sum ./ count
end
# For Aromatic
# ============
function _get_plane(residue::PDBResidue)
name = residue.id.name
planes = Vector{PDBAtom}[]
if name != "TRP"
plane = PDBAtom[]
for atom in residue.atoms
if (name, atom.atom) in PDB._aromatic
push!(plane, atom)
end
end
push!(planes, plane)
else
plane1 = PDBAtom[]
plane2 = PDBAtom[]
for atom in residue.atoms
if atom.atom in Set{String}(["CE2", "CD2", "CZ2", "CZ3", "CH2", "CE3"])
push!(plane1, atom)
end
if atom.atom in Set{String}(["CG", "CD1", "NE1", "CE2", "CD2"])
push!(plane2, atom)
end
end
push!(planes, plane1)
push!(planes, plane2)
end
planes
end
function _centre(planes::Vector{Vector{PDBAtom}})
subset = Vector{PDBAtom}[bestoccupancy(atoms) for atoms in planes]
polyg =
Vector{Coordinates}[Coordinates[a.coordinates for a in atoms] for atoms in subset]
Coordinates[sum(points) ./ Float64(length(points)) for points in polyg]
end
# function _simple_normal_and_centre(atoms::Vector{PDBAtom})
# atoms = bestoccupancy(atoms)
# points = Coordinates[ a.coordinates for a in atoms ]
# (cross(points[2] - points[1], points[3] - points[1]), sum(points)./length(points))
# end
# Show PDB* objects (using Format)
# ====================================
const _Format_ResidueID = FormatExpr("{:>15} {:>15} {:>15} {:>15} {:>15} {:>15}\n")
const _Format_ATOM = FormatExpr("{:>50} {:>15} {:>15} {:>15} {:>15} {:>15} {:>15}\n")
const _Format_ResidueID_Res = FormatExpr("\t\t{:>15} {:>15} {:>15} {:>15} {:>15} {:>15}\n")
const _Format_ATOM_Res =
FormatExpr("\t\t{:<10} {:>50} {:>15} {:>15} {:>15} {:>15} {:>15} {:>15}\n")
function Base.show(io::IO, id::PDBResidueIdentifier)
printfmt(
io,
_Format_ResidueID,
"PDBe_number",
"number",
"name",
"group",
"model",
"chain",
)
printfmt(
io,
_Format_ResidueID,
string('"', id.PDBe_number, '"'),
string('"', id.number, '"'),
string('"', id.name, '"'),
string('"', id.group, '"'),
string('"', id.model, '"'),
string('"', id.chain, '"'),
)
end
function Base.show(io::IO, atom::PDBAtom)
printfmt(
io,
_Format_ATOM,
"coordinates",
"atom",
"element",
"occupancy",
"B",
"alt_id",
"charge",
)
printfmt(
io,
_Format_ATOM,
atom.coordinates,
string('"', atom.atom, '"'),
string('"', atom.element, '"'),
atom.occupancy,
string('"', atom.B, '"'),
string('"', atom.alt_id, '"'),
string('"', atom.charge, '"'),
)
end
function Base.show(io::IO, res::PDBResidue)
println(io, "PDBResidue:\n\tid::PDBResidueIdentifier")
printfmt(
io,
_Format_ResidueID_Res,
"PDBe_number",
"number",
"name",
"group",
"model",
"chain",
)
printfmt(
io,
_Format_ResidueID_Res,
string('"', res.id.PDBe_number, '"'),
string('"', res.id.number, '"'),
string('"', res.id.name, '"'),
string('"', res.id.group, '"'),
string('"', res.id.model, '"'),
string('"', res.id.chain, '"'),
)
len = length(res)
println(io, "\tatoms::Vector{PDBAtom}\tlength: ", len)
for i = 1:len
printfmt(
io,
_Format_ATOM_Res,
"",
"coordinates",
"atom",
"element",
"occupancy",
"B",
"alt_id",
"charge",
)
printfmt(
io,
_Format_ATOM_Res,
string(i, ":"),
res.atoms[i].coordinates,
string('"', res.atoms[i].atom, '"'),
string('"', res.atoms[i].element, '"'),
res.atoms[i].occupancy,
string('"', res.atoms[i].B, '"'),
string('"', res.atoms[i].alt_id, '"'),
string('"', res.atoms[i].charge, '"'),
)
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 268 | # Plot coordinates of the C alpha with best occupancy
@recipe function plot(residues::AbstractVector{PDBResidue})
ca = CAmatrix(residues)
chains = [r.id.chain for r in residues if r.id.group == "ATOM"]
group --> chains
ca[:, 1], ca[:, 2], ca[:, 3]
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2659 | # Functions to extract the sequences from PDB structures.
# =======================================================
"""
is_aminoacid(residue::PDBResidue)
is_aminoacid(residue_id::PDBResidueIdentifier)
This function returns `true` if the PDB residue is an amino acid residue. It checks if the
residue's three-letter name exists in the `MIToS.Utils.THREE2ONE` dictionary, and
returns `false` otherwise.
"""
is_aminoacid(residue::PDBResidue) = is_aminoacid(residue.id)
is_aminoacid(residue_id::PDBResidueIdentifier) = _is_aminoacid(residue_id.name)
_is_aminoacid(residue_name::String) = haskey(THREE2ONE, residue_name)
function _add_sequence!(chains, key, buf)
seq = String(take!(buf))
if !isempty(seq) # do not add empty sequences, for example, if a chain is not selected
if haskey(chains, key)
chains[key] *= seq
else
chains[key] = seq
end
end
end
"""
modelled_sequences(residue_list::AbstractArray{PDBResidue,N};
model::Union{String,Type{All}}=All, chain::Union{String,Type{All}}=All,
group::Union{String,Regex,Type{All}}=All) where N
This function returns an `OrderedDict` where each key is a named tuple (containing the
model and chain identifiers), and each value is the protein sequence corresponding to
the modelled residues in those chains. Therefore, the obtained sequences do not contain
missing residues. All modelled residues are included by default, but those that don't
satisfy specified criteria based on the `model`, `chain`, or `group` keyword arguments
are excluded. One-letter residue names are obtained from the `MIToS.Utils.THREE2ONE`
dictionary for all residue names that return `true` for `is_aminoacid`.
"""
function modelled_sequences(
residue_list::AbstractArray{PDBResidue,N};
model::Union{String,Type{All}} = All,
chain::Union{String,Type{All}} = All,
group::Union{String,Type{All}} = All,
) where {N}
chains = OrderedDict{NamedTuple{(:model, :chain),Tuple{String,String}},String}()
buf = IOBuffer()
first_residue = first(residue_list)
key = (model = first_residue.id.model, chain = first_residue.id.chain)
for res in residue_list
if !_is(res.id.model, model) ||
!_is(res.id.chain, chain) ||
!_is(res.id.group, group) ||
!is_aminoacid(res)
continue
end
current_key = (model = res.id.model, chain = res.id.chain)
if current_key != key
_add_sequence!(chains, key, buf)
key = current_key
end
write(buf, THREE2ONE[res.id.name])
end
_add_sequence!(chains, key, buf)
chains
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1379 | # Download Pfam
# =============
"""
It downloads a gzipped Stockholm alignment from InterPro for the Pfam family with the
given `pfamcode`.
By default, it downloads the `full` Pfam alignment. You can use the `alignment` keyword
argument to download the `seed` or the `uniprot` alignment instead. For example,
`downloadpfam("PF00069")` will download the **full alignment** for the
*PF00069 Pfam family*, while `downloadpfam("PF00069", alignment="seed")` will download the
**seed alignment** of the family.
The extension of the downloaded file is `.stockholm.gz` by default; you can change it
using the `filename` keyword argument, but the `.gz` at the end is mandatory.
"""
function downloadpfam(
pfamcode::String;
filename::String = "$pfamcode.stockholm.gz",
alignment::String = "full",
kargs...,
)
if alignment != "full" && alignment != "seed" && alignment != "uniprot"
throw(ErrorException("alignment must be \"full\", \"seed\" or \"uniprot\""))
end
endswith(filename, ".gz") || error("filename must end in .gz")
if occursin(r"^PF\d{5}$"i, pfamcode)
download_file(
"https://www.ebi.ac.uk/interpro/wwwapi/entry/pfam/$pfamcode/?annotation=alignment:$alignment&download",
filename;
kargs...,
)
else
throw(ErrorException("$pfamcode is not a correct Pfam code"))
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 10054 | # PDB ids from Pfam sequence annotations
# ======================================
const _regex_PDB_from_GS = r"PDB;\s+(\w+)\s+(\w);\s+\w+-\w+;" # i.e.: "PDB; 2VQC A; 4-73;\n"
"""
Generates from a Pfam `msa` a `Dict{String, Vector{Tuple{String,String}}}`.
Keys are sequence IDs and each value is a list of tuples containing PDB code and chain.
```julia
julia> getseq2pdb(msa)
Dict{String,Array{Tuple{String,String},1}} with 1 entry:
"F112_SSV1/3-112" => [("2VQC","A")]
```
"""
function getseq2pdb(msa::AnnotatedMultipleSequenceAlignment)
dict = Dict{String,Vector{Tuple{String,String}}}()
for (k, v) in getannotsequence(msa)
id, annot = k
# i.e.: "#=GS F112_SSV1/3-112 DR PDB; 2VQC A; 4-73;\n"
if annot == "DR" && occursin(_regex_PDB_from_GS, v)
for m in eachmatch(_regex_PDB_from_GS, v)
if haskey(dict, id)
push!(dict[id], (m.captures[1], m.captures[2]))
else
dict[id] = Tuple{String,String}[(m.captures[1], m.captures[2])]
end
end
end
end
sizehint!(dict, length(dict))
end
# Mapping PDB/Pfam
# ================
"""
`msacolumn2pdbresidue(msa, seqid, pdbid, chain, pfamid, siftsfile; strict=false, checkpdbname=false, missings=true)`
This function returns a `OrderedDict{Int,String}` with **MSA column numbers on the input file**
as keys and PDB residue numbers (`""` for missings) as values. The mapping is performed
using SIFTS. This function needs correct *ColMap* and *SeqMap* annotations. This checks
correspondence of the residues between the MSA sequence and SIFTS
(It throws a warning if there are differences). Missing residues are included if the
keyword argument `missings` is `true` (default: `true`). If the keyword argument `strict`
is `true` (default: `false`), throws an Error, instead of a Warning, when residues don't
match. If the keyword argument `checkpdbname` is `true` (default: `false`), throws an Error
if the three letter name of the PDB residue isn't the MSA residue. If you are working with
a **downloaded Pfam MSA without modifications**, you should `read` it using
`generatemapping=true` and `useidcoordinates=true`. If you don't indicate the path to the
`siftsfile` used in the mapping, this function downloads the SIFTS file in the current
folder. If you don't indicate the Pfam accession number (`pfamid`), this function tries to
read the *AC* file annotation.
"""
function msacolumn2pdbresidue(
msa::AnnotatedMultipleSequenceAlignment,
seqid::String,
pdbid::String,
chain::String,
pfamid::String,
siftsfile::String;
strict::Bool = false,
checkpdbname::Bool = false,
missings::Bool = true,
)
siftsres = read_file(siftsfile, SIFTSXML, chain = chain, missings = missings)
up2res = OrderedDict{String,Tuple{String,String,Char}}()
for res in siftsres
if !ismissing(res.Pfam) && res.Pfam.id == uppercase(pfamid)
pfnum = res.Pfam.number
if pfnum == ""
continue
end
pfname = res.Pfam.name
if !ismissing(res.PDB) && (res.PDB.id == lowercase(pdbid)) && !res.missing
up2res[pfnum] =
checkpdbname ? (pfname, res.PDB.number, three2residue(res.PDB.name)) :
(pfname, res.PDB.number, '-')
else
up2res[pfnum] =
checkpdbname ?
(pfname, "", ismissing(res.PDB) ? "" : three2residue(res.PDB.name)) :
(pfname, "", '-')
end
end
end
seq = Char[x for x in vec(getsequence(msa, seqid))]
seqmap = getsequencemapping(msa, seqid)
colmap = getcolumnmapping(msa)
N = ncolumns(msa)
m = OrderedDict{Int,String}()
sizehint!(m, N)
for i = 1:N
up_number = string(seqmap[i])
if up_number != "0"
up_res, pdb_resnum, pdb_res = get(up2res, up_number, ("", "", '-'))
if string(seq[i]) == up_res
m[colmap[i]] = pdb_resnum
else
msg = string(
pfamid,
" ",
seqid,
" ",
pdbid,
" ",
chain,
" : MSA sequence residue at ",
i,
" (",
seq[i],
") != SIFTS residue (UniProt/Pfam: ",
up_res,
", PDB: ",
pdb_resnum,
")",
)
strict ? throw(ErrorException(msg)) : @warn(msg)
end
if (checkpdbname && (seq[i] != pdb_res))
msg = string(
pfamid,
" ",
seqid,
" ",
pdbid,
" ",
chain,
" : MSA sequence residue at ",
i,
" (",
seq[i],
") != PDB residue at ",
pdb_resnum,
" (",
pdb_res,
")",
)
throw(ErrorException(msg))
end
end
end
m
end
function msacolumn2pdbresidue(
msa::AnnotatedMultipleSequenceAlignment,
seqid::String,
pdbid::String,
chain::String,
pfamid::String;
kargs...,
)
msacolumn2pdbresidue(msa, seqid, pdbid, chain, pfamid, downloadsifts(pdbid), kargs...)
end
function msacolumn2pdbresidue(
msa::AnnotatedMultipleSequenceAlignment,
seqid::String,
pdbid::String,
chain::String;
kargs...,
)
msacolumn2pdbresidue(
msa,
seqid,
pdbid,
chain,
String(split(getannotfile(msa, "AC"), '.')[1]),
kargs...,
)
end
"""
Returns a `BitVector` where there is a `true` for each column with PDB residue.
"""
function hasresidues(
msa::AnnotatedMultipleSequenceAlignment,
column2residues::AbstractDict{Int,String},
)
colmap = getcolumnmapping(msa)
ncol = length(colmap)
mask = falses(ncol)
for i = 1:ncol
if get(column2residues, colmap[i], "") != ""
mask[i] = true
end
end
mask
end
# PDB residues for each column
# ============================
"""
This function takes an `AnnotatedMultipleSequenceAlignment` with correct *ColMap*
annotations and two dicts:
1. The first is an `OrderedDict{String,PDBResidue}` from PDB residue number to `PDBResidue`.
2. The second is a `Dict{Int,String}` from MSA column number **on the input file** to PDB residue number.
`msaresidues` returns an `OrderedDict{Int,PDBResidue}` from input column number (ColMap)
to `PDBResidue`. Residues on inserts are not included.
"""
function msaresidues(
msa::AnnotatedMultipleSequenceAlignment,
residues::AbstractDict{String,PDBResidue},
column2residues::AbstractDict{Int,String},
)
colmap = getcolumnmapping(msa)
msares = sizehint!(OrderedDict{Int,PDBResidue}(), length(colmap))
for col in colmap
resnum = get(column2residues, col, "")
if resnum != ""
if haskey(residues, resnum)
msares[col] = residues[resnum]
else
@warn(
"MSA column $col : The residue number $resnum isn't in the residues Dict."
)
end
end
end
sizehint!(msares, length(msares))
end
# Contact Map
# ===========
"""
This function takes an `AnnotatedMultipleSequenceAlignment` with correct *ColMap*
annotations and two dicts:
1. The first is an `OrderedDict{String,PDBResidue}` from PDB residue number to `PDBResidue`.
2. The second is a `Dict{Int,String}` from **MSA column number on the input file** to PDB residue number.
`msacontacts` returns a `PairwiseListMatrix{Float64,false}` of `0.0` and `1.0` where `1.0`
indicates a residue contact. Contacts are defined with an inter residue distance less or
equal to `distance_limit` (default to `6.05`) angstroms between any heavy atom. `NaN`
indicates a missing value.
"""
function msacontacts(
msa::AnnotatedMultipleSequenceAlignment,
residues::AbstractDict{String,PDBResidue},
column2residues::AbstractDict{Int,String},
distance_limit::Float64 = 6.05,
)
colmap = getcolumnmapping(msa)
contacts = columnpairsmatrix(msa)
plm = getarray(contacts)
@inbounds @iterateupper plm false begin
resi = get(column2residues, colmap[i], "")
resj = get(column2residues, colmap[j], "")
if resi != "" && resj != "" && haskey(residues, resi) && haskey(residues, resj)
list[k] = Float64(contact(residues[resi], residues[resj], distance_limit))
else
list[k] = NaN
end
end
contacts
end
# AUC (contact prediction)
# ========================
"""
This function takes a `msacontacts` or its list of contacts `contact_list` with 1.0 for
true contacts and 0.0 for not contacts (NaN or other numbers for missing values).
Returns two `BitVector`s, the first with `true`s where `contact_list` is 1.0 and the second
with `true`s where `contact_list` is 0.0. There are useful for AUC calculations.
"""
function getcontactmasks(contact_list::Vector{T}) where {T<:AbstractFloat}
N = length(contact_list)
true_contacts = falses(N)
false_contacts = falses(N)
@inbounds for i = 1:N
value = contact_list[i]
if value == 1.0
true_contacts[i] = true
elseif value == 0.0
false_contacts[i] = true
end
# If value is NaN, It keeps the false value
end
true_contacts, false_contacts
end
function getcontactmasks(plm::PairwiseListMatrix{T,false,VT}) where {T<:AbstractFloat,VT}
getcontactmasks(getlist(plm))
end
function getcontactmasks(
nplm::NamedArray{T,2,PairwiseListMatrix{T,false,TV},DN},
) where {T,TV,DN}
getcontactmasks(getarray(nplm))
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 903 | """
The `Pfam` module, defines functions to measure the protein contact prediction performance of information measure between column pairs from a Pfam MSA.
**Features**
- Read and download Pfam MSAs
- Obtain PDB information from alignment annotations
- Map between sequence/alignment residues/columns and PDB structures
- Measure of AUC (ROC curve) for contact prediction of MI scores
```julia
using MIToS.Pfam
```
"""
module Pfam
using MIToS.Utils
using MIToS.MSA
using MIToS.SIFTS
using MIToS.PDB
using MIToS.Information
using PairwiseListMatrices
using NamedArrays
using OrderedCollections
export # Download
downloadpfam,
# PDB
Stockholm,
getseq2pdb,
msacolumn2pdbresidue,
hasresidues,
msacontacts,
msaresidues,
getcontactmasks,
# Utils
read_file,
parse_file,
write_file,
print_file
include("Download.jl")
include("PDB.jl")
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 15268 | abstract type DataBase end
"""
`dbPDBe` stores the residue `number` and `name` in PDBe as strings.
"""
@auto_hash_equals struct dbPDBe <: DataBase
number::String # Cross referenced residue number
name::String # Cross referenced residue name
end
"""
`dbInterPro` stores the residue `id`, `number`, `name` and `evidence` in InterPro as strings.
"""
@auto_hash_equals struct dbInterPro <: DataBase
id::String
number::String # Cross referenced residue number
name::String # Cross referenced residue name
evidence::String
end
"""
`dbEnsembl` stores the residue (gene) accession `id`, the `transcript`,
`translation` and `exon` ids in Ensembl as strings, together with the residue
`number` and `name` using the UniProt coordinates.
"""
@auto_hash_equals struct dbEnsembl <: DataBase
id::String # (gene) accession id
number::String # Cross referenced residue number
name::String # Cross referenced residue name
transcript::String
translation::String
exon::String
end
for ref_type in [:dbUniProt, :dbPfam, :dbNCBI]
@eval begin
@auto_hash_equals struct $(ref_type) <: DataBase
id::String # The cross reference database identifier
number::String # Cross referenced residue number
name::String # Cross referenced residue name
end
end
end
@doc """
`dbUniProt` stores the residue `id`, `number` and `name` in UniProt as strings.
""" dbUniProt
@doc """
`dbPfam` stores the residue `id`, `number` and `name` in Pfam as strings.
""" dbPfam
@doc """
`dbNCBI` stores the residue `id`, `number` and `name` in NCBI as strings.
""" dbNCBI
for ref_type in [:dbPDB, :dbCATH, :dbSCOP, :dbSCOP2, :dbSCOP2B]
@eval begin
@auto_hash_equals struct $(ref_type) <: DataBase
id::String
number::String
name::String
chain::String
end
end
end
@doc """
`dbPDB` stores the residue `id`, `number`, `name` and `chain` in PDB as strings.
""" dbPDB
@doc """
`dbCATH` stores the residue `id`, `number`, `name` and `chain` in CATH as
strings.
""" dbCATH
@doc """
`dbSCOP` stores the residue `id`, `number`, `name` and `chain` in SCOP as
strings.
""" dbSCOP
@doc """
`dbSCOP2` stores the residue `id`, `number`, `name` and `chain` in SCOP2 as
strings.
""" dbSCOP2
@doc """
`dbSCOP2B` stores the residue `id`, `number`, `name` and `chain` in SCOP2B as
strings. *SCOP2B* is expansion of *SCOP2* domain annotations at superfamily
level to every *PDB* with same *UniProt* accession having at least 80% *SCOP2*
domain coverage.
""" dbSCOP2B
"""
Returns "" if the attributte is missing
"""
function _get_attribute(elem::LightXML.XMLElement, attr::String)
text = LightXML.attribute(elem, attr)
if text === nothing || text == "None"
return ("")
else
return (text)
end
end
"""
Returns `missing` if the attributte is missing
"""
function _get_nullable_attribute(
elem::LightXML.XMLElement,
attr::String,
)::Union{String,Missing}
text = LightXML.attribute(elem, attr)
(text === nothing || text == "None") ? missing : text
end
for ref_type in [:dbPDB, :dbCATH, :dbSCOP, :dbSCOP2, :dbSCOP2B]
@eval begin
function $(ref_type)(map::LightXML.XMLElement)
$(ref_type)(
_get_attribute(map, "dbAccessionId"),
_get_attribute(map, "dbResNum"),
_get_attribute(map, "dbResName"),
_get_attribute(map, "dbChainId"),
)
end
end
end
for ref_type in [:dbUniProt, :dbPfam, :dbNCBI]
@eval begin
function $(ref_type)(map::LightXML.XMLElement)
$(ref_type)(
_get_attribute(map, "dbAccessionId"),
_get_attribute(map, "dbResNum"),
_get_attribute(map, "dbResName"),
)
end
end
end
function dbEnsembl(map::LightXML.XMLElement)
dbEnsembl(
_get_attribute(map, "dbAccessionId"),
_get_attribute(map, "dbResNum"),
_get_attribute(map, "dbResName"),
_get_attribute(map, "dbTranscriptId"),
_get_attribute(map, "dbTranslationId"),
_get_attribute(map, "dbExonId"),
)
end
function dbInterPro(map::LightXML.XMLElement)
dbInterPro(
_get_attribute(map, "dbAccessionId"),
_get_attribute(map, "dbResNum"),
_get_attribute(map, "dbResName"),
_get_attribute(map, "dbEvidence"),
)
end
function dbPDBe(map::LightXML.XMLElement)
dbPDBe(_get_attribute(map, "dbResNum"), _get_attribute(map, "dbResName"))
end
"""
A `SIFTSResidue` object stores the SIFTS residue level mapping for a residue. It has the
following fields that you can access at any moment for query purposes:
- `PDBe` : A `dbPDBe` object, it's present in all the `SIFTSResidue`s.
- `UniProt` : A `dbUniProt` object or `missing`.
- `Pfam` : A `dbPfam` object or `missing`.
- `NCBI` : A `dbNCBI` object or `missing`.
- `InterPro` : An array of `dbInterPro` objects.
- `PDB` : A `dbPDB` object or `missing`.
- `SCOP` : A `dbSCOP` object or `missing`.
- `SCOP2` : An array of `dbSCOP2` objects.
- `SCOP2B` : A `dbSCOP2B` object or `missing`.
- `CATH` : A `dbCATH` object or `missing`.
- `Ensembl` : An array of `dbEnsembl` objects.
- `missing` : It's `true` if the residue is missing, i.e. not observed, in the structure.
- `sscode` : A string with the secondary structure code of the residue.
- `ssname` : A string with the secondary structure name of the residue.
"""
@auto_hash_equals struct SIFTSResidue
PDBe::dbPDBe
# crossRefDb
UniProt::Union{dbUniProt,Missing}
Pfam::Union{dbPfam,Missing}
NCBI::Union{dbNCBI,Missing}
InterPro::Array{dbInterPro,1}
PDB::Union{dbPDB,Missing}
SCOP::Union{dbSCOP,Missing}
SCOP2::Array{dbSCOP2,1}
SCOP2B::Union{dbSCOP2B,Missing}
CATH::Union{dbCATH,Missing}
Ensembl::Array{dbEnsembl,1}
# residueDetail
missing::Bool # XML: <residueDetail dbSource="PDBe" property="Annotation" ...
sscode::String # XML: <residueDetail dbSource="PDBe" property="codeSecondaryStructure"...
ssname::String # XML: <residueDetail dbSource="PDBe" property="nameSecondaryStructure"...
end
# Getters
# -------
@inline _name(::Type{dbPDBe}) = "PDBe"
@inline _name(::Type{dbUniProt}) = "UniProt"
@inline _name(::Type{dbPfam}) = "Pfam"
@inline _name(::Type{dbNCBI}) = "NCBI"
@inline _name(::Type{dbInterPro}) = "InterPro"
@inline _name(::Type{dbPDB}) = "PDB"
@inline _name(::Type{dbSCOP}) = "SCOP"
@inline _name(::Type{dbSCOP2}) = "SCOP2"
@inline _name(::Type{dbSCOP2B}) = "SCOP2B"
@inline _name(::Type{dbCATH}) = "CATH"
@inline _name(::Type{dbEnsembl}) = "Ensembl"
@inline Base.get(res::SIFTSResidue, db::Type{dbPDBe}) = res.PDBe
@inline Base.get(res::SIFTSResidue, db::Type{dbUniProt}) = res.UniProt
@inline Base.get(res::SIFTSResidue, db::Type{dbPfam}) = res.Pfam
@inline Base.get(res::SIFTSResidue, db::Type{dbNCBI}) = res.NCBI
@inline Base.get(res::SIFTSResidue, db::Type{dbInterPro}) = res.InterPro
@inline Base.get(res::SIFTSResidue, db::Type{dbPDB}) = res.PDB
@inline Base.get(res::SIFTSResidue, db::Type{dbSCOP}) = res.SCOP
@inline Base.get(res::SIFTSResidue, db::Type{dbSCOP2}) = res.SCOP2
@inline Base.get(res::SIFTSResidue, db::Type{dbSCOP2B}) = res.SCOP2B
@inline Base.get(res::SIFTSResidue, db::Type{dbCATH}) = res.CATH
@inline Base.get(res::SIFTSResidue, db::Type{dbEnsembl}) = res.Ensembl
function Base.get(
res::SIFTSResidue,
db::Type{T},
field::Symbol,
default::Union{String,Missing} = missing,
) where {T<:Union{dbUniProt,dbPfam,dbNCBI,dbPDB,dbSCOP,dbSCOP2B,dbCATH}}
database = get(res, db)
ismissing(database) ? default : getfield(database, field)
end
# Print
# -----
function Base.show(io::IO, res::SIFTSResidue)
if res.missing
println(io, "SIFTSResidue (missing)")
else
println(
io,
"SIFTSResidue with secondary structure code (sscode): \"",
res.sscode,
"\" and name (ssname): \"",
res.ssname,
"\"",
)
end
println(io, " PDBe:")
println(io, " number: ", res.PDBe.number)
println(io, " name: ", res.PDBe.name)
for dbname in [:UniProt, :Pfam, :NCBI, :PDB, :SCOP, :SCOP2B, :CATH]
dbfield = getfield(res, dbname)
if !ismissing(dbfield)
println(io, " ", dbname, " :")
for f in fieldnames(typeof(dbfield))
println(io, " ", f, ": ", getfield(dbfield, f))
end
end
end
length(res.SCOP2) > 0 && println(io, " SCOP2: ", res.SCOP2)
length(res.InterPro) > 0 && println(io, " InterPro: ", res.InterPro)
length(res.Ensembl) > 0 && println(io, " Ensembl: ", res.Ensembl)
end
# Creation
# --------
function SIFTSResidue(
residue::LightXML.XMLElement,
missing_residue::Bool,
sscode::String,
ssname::String,
)
PDBe = dbPDBe(residue)
UniProt = missing
Pfam = missing
NCBI = missing
InterPro = dbInterPro[]
PDB = missing
SCOP = missing
SCOP2 = dbSCOP2[]
SCOP2B = missing
CATH = missing
Ensembl = dbEnsembl[]
for crossref in LightXML.get_elements_by_tagname(residue, "crossRefDb")
db = LightXML.attribute(crossref, "dbSource")
if db == "UniProt"
UniProt = dbUniProt(crossref)
elseif db == "Pfam"
Pfam = dbPfam(crossref)
elseif db == "NCBI"
NCBI = dbNCBI(crossref)
elseif db == "InterPro"
push!(InterPro, dbInterPro(crossref))
elseif db == "PDB"
PDB = dbPDB(crossref)
elseif db == "SCOP"
SCOP = dbSCOP(crossref)
elseif db == "SCOP2"
push!(SCOP2, dbSCOP2(crossref))
elseif db == "SCOP2B"
SCOP2B = dbSCOP2B(crossref)
elseif db == "CATH"
CATH = dbCATH(crossref)
elseif db == "Ensembl"
push!(Ensembl, dbEnsembl(crossref))
else
@warn(string(db, " is not in the MIToS' DataBases."))
end
end
SIFTSResidue(
PDBe,
UniProt,
Pfam,
NCBI,
InterPro,
PDB,
SCOP,
SCOP2,
SCOP2B,
CATH,
Ensembl,
missing_residue,
sscode,
ssname,
)
end
function SIFTSResidue(residue::LightXML.XMLElement)
SIFTSResidue(residue, _get_details(residue)...)
end
# Mapping Functions
# =================
_is_All(::Any) = false
_is_All(::Type{All}) = true
"""
Parses a SIFTS XML file and returns a `OrderedDict` between residue numbers of
two `DataBase`s with the given identifiers. A `chain` could be specified
(`All` by default). If `missings` is `true` (default) all the residues are
used, even if they haven’t coordinates in the PDB file.
"""
function siftsmapping(
filename::String,
db_from::Type{F},
id_from::String,
db_to::Type{T},
id_to::String;
chain::Union{Type{All},String} = All,
missings::Bool = true,
) where {F,T}
mapping = OrderedDict{String,String}()
xdoc = LightXML.parse_file(filename)
try
for entity in _get_entities(xdoc)
segments = _get_segments(entity)
for segment in segments
residues = _get_residues(segment)
for residue in residues
in_chain = _is_All(chain)
key_data =
_name(db_from) == "PDBe" ? LightXML.attribute(residue, "dbResNum") :
missing
value_data =
_name(db_to) == "PDBe" ? LightXML.attribute(residue, "dbResNum") :
missing
if missings || !_is_missing(residue)
crossref = LightXML.get_elements_by_tagname(residue, "crossRefDb")
for ref in crossref
source = LightXML.attribute(ref, "dbSource")
if source == _name(db_from) &&
LightXML.attribute(ref, "dbAccessionId") == id_from
key_data = _get_nullable_attribute(ref, "dbResNum")
end
if source == _name(db_to) &&
LightXML.attribute(ref, "dbAccessionId") == id_to
value_data = _get_nullable_attribute(ref, "dbResNum")
end
if !in_chain && source == "PDB" # XML: <crossRefDb dbSource="PDB" ... dbChainId="E"/>
in_chain = LightXML.attribute(ref, "dbChainId") == chain
end
end
if !ismissing(key_data) && !ismissing(value_data) && in_chain
key = key_data
if haskey(mapping, key)
@warn string(
"$key is already in the mapping with the value ",
mapping[key],
". The value is replaced by ",
value_data,
)
end
mapping[key] = value_data
end
end
end
end
end
finally
LightXML.free(xdoc)
end
sizehint!(mapping, length(mapping))
end
"""
`parse_file(document::LightXML.XMLDocument, ::Type{SIFTSXML}; chain=All, missings::Bool=true)`
Returns a `Vector{SIFTSResidue}` parsed from a `SIFTSXML` file.
By default, parses all the `chain`s and includes missing residues.
"""
function Utils.parse_file(
document::LightXML.XMLDocument,
::Type{SIFTSXML};
chain::Union{Type{All},String} = All,
missings::Bool = true,
)
vector = SIFTSResidue[]
for entity in _get_entities(document)
for segment in _get_segments(entity)
residues = _get_residues(segment)
for residue in residues
missing_residue, sscode, ssname = _get_details(residue)
if missings || !missing_residue
sifts_res = SIFTSResidue(residue, missing_residue, sscode, ssname)
if _is_All(chain) ||
(!ismissing(sifts_res.PDB) && sifts_res.PDB.chain == chain)
push!(vector, sifts_res)
end
end
end
end
end
vector
end
function Utils.parse_file(fh::Union{IO,AbstractString}, ::Type{SIFTSXML}; kwargs...)
throw(
ArgumentError("The SIFTS XML file should have the .xml or the .xml.gz extension."),
)
end
# Find SIFTSResidue
# -----------------
for F in (:findall, :filter!, :filter)
@eval begin
function Base.$(F)(
f::Function,
list::AbstractVector{SIFTSResidue},
db::Type{T},
) where {T<:DataBase}
$(F)(list) do res
database = get(res, db)
if !ismissing(database)
f(database)
end
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1083 | """
The `SIFTS` module of MIToS allows to obtain the
residue-level mapping between databases stored in the SIFTS XML files.
It makes easy to assign PDB residues to UniProt/Pfam positions.
Given the fact that pairwise alignments can lead to misleading association between residues in both sequences,
SIFTS offers more reliable association between sequence and structure residue numbers.
**Features**
- Download and parse SIFTS XML files
- Store residue-level mapping in Julia
- Easy generation of `OrderedDict`s between residues numbers
```julia
using MIToS.SIFTS
```
"""
module SIFTS
import LightXML
using AutoHashEquals
using OrderedCollections
using MIToS.Utils
export DataBase,
dbPDBe,
dbInterPro,
dbUniProt,
dbPfam,
dbNCBI,
dbPDB,
dbCATH,
dbSCOP,
dbSCOP2,
dbSCOP2B,
dbEnsembl,
SIFTSResidue,
downloadsifts,
siftsmapping,
SIFTSXML,
# Mitos.Utils
All,
read_file,
parse_file,
# Imported from Base (and exported for docs)
parse
include("XMLParser.jl")
include("ResidueMapping.jl")
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4072 | struct SIFTSXML <: FileFormat end
# Download SIFTS
# ==============
"""
downloadsifts(pdbcode::String; filename::String, source::String="https")
Download the gzipped SIFTS XML file for the provided `pdbcode`.
The downloaded file will have the default extension `.xml.gz`.
While you can change the `filename`, it must include the `.xml.gz` ending.
The `source` keyword argument is set to `"https"` by default.
Alternatively, you can choose `"ftp"` as the `source`, which will retrieve the file from
the EBI FTP server at ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/.
However, please note that using `"https"` is highly recommended.
This option will download the file from the
EBI PDBe server at https://www.ebi.ac.uk/pdbe/files/sifts/.
"""
function downloadsifts(
pdbcode::String;
filename::String = "$(lowercase(pdbcode)).xml.gz",
source::String = "https",
)
@assert endswith(filename, ".xml.gz") "filename must end with .xml.gz"
@assert source == "ftp" || source == "https" "source must be ftp or https"
if check_pdbcode(pdbcode)
url = if source == "ftp"
string(
"ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/split_xml/",
lowercase(pdbcode[2:3]),
"/",
lowercase(pdbcode),
".xml.gz",
)
else
string("https://www.ebi.ac.uk/pdbe/files/sifts/", lowercase(pdbcode), ".xml.gz")
end
download_file(url, filename)
else
throw(ErrorException("$pdbcode is not a correct PDB"))
end
filename
end
# Internal Parser Functions
# =========================
"""
Gets the entities of a SIFTS XML. In some cases, each entity is a PDB chain.
WARNING: Sometimes there are more chains than entities!
```
<entry dbSource="PDBe" ...
...
<entity type="protein" entityId="A">
...
</entity>
<entity type="protein" entityId="B">
...
</entity>
<\entry>
```
"""
function _get_entities(sifts)
siftsroot = LightXML.root(sifts)
LightXML.get_elements_by_tagname(siftsroot, "entity")
end
"""
Gets an array of the segments, the continuous region of an entity.
Chimeras and expression tags generates more than one segment for example.
"""
_get_segments(entity) = LightXML.get_elements_by_tagname(entity, "segment")
"""
Returns an Iterator of the residues on the listResidue
```
<listResidue>
<residue>
...
</residue>
...
</listResidue>
```
"""
function _get_residues(segment)
LightXML.child_elements(
select_element(
LightXML.get_elements_by_tagname(segment, "listResidue"),
"listResidue",
),
)
end
"""
Returns `true` if the residue was annotated as *Not_Observed*.
"""
function _is_missing(residue)
details = LightXML.get_elements_by_tagname(residue, "residueDetail")
for det in details
# XML: <residueDetail dbSource="PDBe" property="Annotation">Not_Observed</residueDetail>
if LightXML.attribute(det, "property") == "Annotation" &&
LightXML.content(det) == "Not_Observed"
return (true)
end
end
false
end
function _get_details(residue)::Tuple{Bool,String,String}
details = LightXML.get_elements_by_tagname(residue, "residueDetail")
missing_residue = false
sscode = " "
ssname = " "
for det in details
detail_property = LightXML.attribute(det, "property")
# XML: <residueDetail dbSource="PDBe" property="Annotation">Not_Observed</residueDetail>
if detail_property == "Annotation" && LightXML.content(det) == "Not_Observed"
missing_residue = true
break
# XML: <residueDetail dbSource="PDBe" property="codeSecondaryStructure"...
elseif detail_property == "codeSecondaryStructure"
sscode = LightXML.content(det)
# XML: <residueDetail dbSource="PDBe" property="nameSecondaryStructure"...
elseif detail_property == "nameSecondaryStructure"
ssname = LightXML.content(det)
end
end
(missing_residue, sscode, ssname)
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4647 | """
All is used instead of MIToS 1.0 "all" or "*", because it's possible to dispatch on it.
"""
struct All end
_get_function_name(str::String)::String = split(str, '.')[end]
"""
This function performs the same operation as
`something(findnext(r"[ \t]+", line, last(last_spaces)+1), 0:-1)` but it is faster.
"""
function _find_next_space_or_tab(line, start_pos::Int)
for i = start_pos:lastindex(line)
char = line[i]
if char == ' ' || char == '\t'
start_index = i
end_index = start_index
while end_index <= lastindex(line) &&
(line[end_index] == ' ' || line[end_index] == '\t')
end_index = nextind(line, end_index)
end
return start_index:(prevind(line, end_index))
end
end
return 0:-1
end
"""
`get_n_words{T <: Union{ASCIIString, UTF8String}}(line::T, n::Int)`
It returns a `Vector{T}` with the first `n` (possibles) words/fields (delimited
by space or tab). If there is more than `n` words, the last word
returned contains the finals words and the delimiters. The length of the
returned vector is `n` or less (if the number of words is less than `n`).
This is used for parsing the Stockholm format.
```jldoctest
julia> using MIToS.Utils
julia> get_n_words("#=GR O31698/18-71 SS CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH", 3)
3-element Vector{String}:
"#=GR"
"O31698/18-71"
"SS CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH"
```
"""
function get_n_words(line::String, n::Int)
if isempty(line)
return String[]
end
words = Array{String}(undef, n)
N = 1
last_spaces = 0:0
while true
if N == n
@inbounds words[N] = line[(last(last_spaces)+1):end]
break
end
spaces = _find_next_space_or_tab(line, last(last_spaces) + 1)
if first(spaces) == 0
@inbounds words[N] = line[(last(last_spaces)+1):end]
break
end
@inbounds words[N] = line[(last(last_spaces)+1):(first(spaces)-1)]
last_spaces = spaces
N += 1
end
if N != n
resize!(words, N)
end
words
end
"""
`hascoordinates(id)`
It returns `true` if `id`/sequence name has the format: **UniProt/start-end**
(i.e. O83071/192-246)
"""
function hascoordinates(id)
occursin(r"^\w+/\d+-\d+$", id)
end
"""
Selects the first element of the vector. This is useful for unpacking one element vectors.
Throws a warning if there are more elements. `element_name` is *element* by default,
but the name can be changed using the second argument.
"""
function select_element(vector::Array{T,1}, element_name::String = "element") where {T}
len = length(vector)
if len == 0
throw(ErrorException("There is not $element_name"))
elseif len != 1
@warn("There are more than one ($len) $element_name using the first.")
end
@inbounds return (vector[1])
end
"""
Returns a vector with the `part` ("upper" or "lower") of the square matrix `mat`.
The `diagonal` is not included by default.
"""
function matrix2list(
mat::AbstractMatrix{T};
part = "upper",
diagonal::Bool = false,
) where {T}
nrow, ncol = size(mat)
if nrow != ncol
throw(ErrorException("Should be a square matrix"))
end
if diagonal
d = 0
N = div((ncol * ncol) + ncol, 2)
else
d = 1
N = div((ncol * ncol) - ncol, 2)
end
list = Array{T}(undef, N)
k = 1
if part == "upper"
for i = 1:(ncol-d)
for j = (i+d):ncol
list[k] = mat[i, j]
k += 1
end
end
elseif part == "lower"
for j = 1:(ncol-d)
for i = (j+d):ncol
list[k] = mat[i, j]
k += 1
end
end
else
throw(ErrorException("part should be \"upper\" or \"lower\""))
end
list
end
"""
Returns a square symmetric matrix from the vector `vec`. `side` is the number of
rows/columns. The `diagonal` is not included by default, set to `true` if there are
diagonal elements in the list.
"""
function list2matrix(vec::AbstractVector{T}, side::Int; diagonal::Bool = false) where {T}
d = diagonal ? 0 : 1
mat = zeros(T, side, side)
k = 1
for i = 1:(side-d)
for j = (i+d):side
value = vec[k]
mat[i, j] = value
mat[j, i] = value
k += 1
end
end
mat
end
"""
It checks if a PDB code has the correct format.
"""
check_pdbcode(pdbcode::String) = occursin(r"^\w{4}$", pdbcode)
"""
Getter for the `array` field of `NamedArray`s
"""
getarray(x::NamedArray) = x.array
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4921 | import Base: read
"""
`FileFormat` is used for defile special `parse_file` (called by `read_file`) and
`print_file` (called by `read_file`) methods for different file formats.
"""
abstract type FileFormat end
"""
This function raises an error if a GZip file doesn't have the 0x1f8b magic number.
"""
function _check_gzip_file(filename)
if endswith(filename, ".gz")
open(filename, "r") do fh
magic = read(fh, UInt16)
# 0x1f8b is the magic number for GZip files
# However, some files use 0x8b1f.
# For example, the file test/data/18gs.xml.gz uses 0x8b1f.
if magic != 0x1f8b && magic != 0x8b1f
throw(ErrorException("$filename is not a GZip file!"))
end
end
end
filename
end
function _download_file(url::AbstractString, filename::AbstractString; kargs...)
with_logger(ConsoleLogger(stderr, Logging.Warn)) do
Downloads.download(url, filename; kargs...)
end
_check_gzip_file(filename)
end
"""
`download_file` uses **Downloads.jl** to download files from the web. It takes the file
url as first argument and, optionally, a path to save it.
Keyword arguments are are directly passed to to `Downloads.download`.
```jldoctest
julia> using MIToS.Utils
julia> download_file("https://www.uniprot.org/uniprot/P69905.fasta", "seq.fasta")
"seq.fasta"
```
"""
function download_file(url::AbstractString, filename::AbstractString; kargs...)
retry(_download_file, delays = ExponentialBackOff(n = 5))(url, filename; kargs...)
end
function download_file(url::AbstractString; kargs...)
name = tempname()
if endswith(url, ".gz")
name *= ".gz"
end
download_file(url, name; kargs...)
end
"""
Create an iterable object that will yield each line from a stream **or string**.
"""
lineiterator(string::String) = eachline(IOBuffer(string))
lineiterator(stream::IO) = eachline(stream)
"""
Returns the `filename`.
Throws an `ErrorException` if the file doesn't exist, or a warning if the file is empty.
"""
function check_file(filename)
if !isfile(filename)
throw(ErrorException(string(filename, " doesn't exist!")))
elseif filesize(filename) == 0
@warn string(filename, " is empty!")
end
filename
end
"""
Returns `true` if the file exists and isn't empty.
"""
isnotemptyfile(filename) = isfile(filename) && filesize(filename) > 0
# for using with download, since filename doesn't have file extension
function _read(
completename::AbstractString,
filename::AbstractString,
format::Type{T},
args...;
kargs...,
) where {T<:FileFormat}
check_file(filename)
if endswith(completename, ".xml.gz") || endswith(completename, ".xml")
document = LightXML.parse_file(filename)
try
parse_file(document, T, args...; kargs...)
finally
LightXML.free(document)
end
else
fh = open(filename, "r")
try
fh = endswith(completename, ".gz") ? GzipDecompressorStream(fh) : fh
parse_file(fh, T, args...; kargs...)
finally
close(fh)
end
end
end
"""
`read_file(pathname, FileFormat [, Type [, … ] ] ) -> Type`
This function opens a file in the `pathname` and calls `parse_file(io, ...)` for
the given `FileFormat` and `Type` on it. If the `pathname` is an HTTP or FTP URL,
the file is downloaded with `download` in a temporal file.
Gzipped files should end on `.gz`.
"""
function read_file(
completename::AbstractString,
format::Type{T},
args...;
kargs...,
) where {T<:FileFormat}
if startswith(completename, "http://") ||
startswith(completename, "https://") ||
startswith(completename, "ftp://")
filename =
download_file(completename, headers = Dict("Accept-Encoding" => "identity"))
try
_read(completename, filename, T, args...; kargs...)
finally
rm(filename)
end
else
# completename and filename are the same
_read(completename, completename, T, args...; kargs...)
end
end
function read(
name::AbstractString,
format::Type{T},
args...;
kargs...,
) where {T<:FileFormat}
Base.depwarn(
"Using read with $format is deprecated, use read_file instead.",
:read,
force = true,
)
read_file(name, format, args...; kargs...)
end
# parse_file
# ----------
function Base.parse(
io::Union{IO,AbstractString},
format::Type{T},
args...;
kargs...,
) where {T<:FileFormat}
Base.depwarn(
"Using parse with $format is deprecated, use parse_file instead.",
:parse,
force = true,
)
parse_file(io, format, args...; kargs...)
end
# A placeholder to define the function name so that other modules can add their own
# definition of parse_file for their own `FileFormat`s
function parse_file end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 14511 | # Three letters (for PDB and MSA)
# ===============================
# download("ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif","components.cif")
#
# open("components.cif","r") do fh
# m3 = nothing
# m1 = nothing
# peptide = false
# for line in eachline(fh)
# if startswith(line, "_chem_comp")
# if m3 == nothing
# m3 = match(r"^_chem_comp.three_letter_code\s+(\w{3})\s*$",line) # It avoids: DA, DT, ...
# end
# if m1 == nothing
# m1 = match(r"^_chem_comp.one_letter_code\s+(\w{1})\s*$",line) # It avoids: ?
# end
# if startswith(line,"_chem_comp.type")
# peptide = occursin(r"peptide"i,line) # It avoids 0DA, 2DA, etc...
# end
# if m1 != nothing && m3 != nothing && parent != nothing && peptide
# res = collect(m1[1])[1]
# if (Residue(res) != XAA) # It avoids non standar residues, i.e. PYL
# println("\"$(m3[1])\" => '$(Char(Residue(res)))',")
# end
# m3 = nothing
# m1 = nothing
# peptide = false
# end
# else
# m3 = nothing
# m1 = nothing
# peptide = false
# end
# end
# end
"""
`THREE2ONE` is a dictionary that maps three-letter amino acid residue codes (`String`)
to their corresponding one-letter codes (`Char`). The dictionary is generated by
parsing `components.cif` file from the Protein Data Bank.
```jldoctest
julia> using MIToS.Utils
julia> one_letter_code = THREE2ONE["ALA"]
'A': ASCII/Unicode U+0041 (category Lu: Letter, uppercase)
```
"""
const THREE2ONE = Dict{String,Char}(
"00C" => 'C',
"02K" => 'A',
"02L" => 'N',
"03Y" => 'C',
"07O" => 'C',
"08P" => 'C',
"0A0" => 'D',
"0A1" => 'Y',
"0A2" => 'K',
"0A8" => 'C',
"0AA" => 'V',
"0AB" => 'V',
"0AC" => 'G',
"0AF" => 'W',
"0AG" => 'L',
"0AH" => 'S',
"0AK" => 'D',
"0BN" => 'F',
"0CS" => 'A',
"0E5" => 'T',
"0EA" => 'Y',
"0FL" => 'A',
"0NC" => 'A',
"0WZ" => 'Y',
"0Y8" => 'P',
"143" => 'C',
"1OP" => 'Y',
"1PA" => 'F',
"1PI" => 'A',
"1TQ" => 'W',
"1TY" => 'Y',
"1X6" => 'S',
"200" => 'F',
"23F" => 'F',
"26B" => 'T',
"2AG" => 'A',
"2CO" => 'C',
"2FM" => 'M',
"2HF" => 'H',
"2KK" => 'K',
"2KP" => 'K',
"2LU" => 'L',
"2ML" => 'L',
"2MR" => 'R',
"2MT" => 'P',
"2OR" => 'R',
"2QZ" => 'T',
"2R3" => 'Y',
"2TL" => 'T',
"2TY" => 'Y',
"2VA" => 'V',
"2XA" => 'C',
"33X" => 'A',
"3AH" => 'H',
"3CF" => 'F',
"3GA" => 'A',
"3MD" => 'D',
"3NF" => 'Y',
"3QN" => 'K',
"3XH" => 'G',
"4BF" => 'Y',
"4CF" => 'F',
"4CY" => 'M',
"4DP" => 'W',
"4FB" => 'P',
"4FW" => 'W',
"4GJ" => 'C',
"4HT" => 'W',
"4IN" => 'W',
"4PH" => 'F',
"4U7" => 'A',
"56A" => 'H',
"5AB" => 'A',
"5CR" => 'F',
"5CS" => 'C',
"5CT" => 'K',
"5CW" => 'W',
"5HP" => 'E',
"5OW" => 'K',
"5VV" => 'N',
"6CL" => 'K',
"6CW" => 'W',
"6GL" => 'A',
"6HN" => 'K',
"6V1" => 'C',
"6WK" => 'C',
"7JA" => 'I',
"9NE" => 'E',
"9NF" => 'F',
"9NR" => 'R',
"9NV" => 'V',
"A5N" => 'N',
"AA3" => 'A',
"AA4" => 'A',
"AAR" => 'R',
"ABA" => 'A',
"ACB" => 'D',
"ACL" => 'R',
"AEI" => 'D',
"AFA" => 'N',
"AGM" => 'R',
"AGT" => 'C',
"AHB" => 'N',
"AHO" => 'A',
"AHP" => 'A',
"AIB" => 'A',
"AKL" => 'D',
"AKZ" => 'D',
"ALA" => 'A',
"ALC" => 'A',
"ALM" => 'A',
"ALN" => 'A',
"ALO" => 'T',
"ALS" => 'A',
"ALT" => 'A',
"ALV" => 'A',
"ALY" => 'K',
"AN8" => 'A',
"APH" => 'A',
"API" => 'K',
"APK" => 'K',
"AR2" => 'R',
"AR4" => 'E',
"AR7" => 'R',
"ARG" => 'R',
"ARM" => 'R',
"ARO" => 'R',
"AS2" => 'D',
"ASA" => 'D',
"ASB" => 'D',
"ASI" => 'D',
"ASK" => 'D',
"ASL" => 'D',
"ASN" => 'N',
"ASP" => 'D',
"ASQ" => 'D',
"AYA" => 'A',
"AZK" => 'K',
"AZS" => 'S',
"AZY" => 'Y',
"B1F" => 'F',
"B2A" => 'A',
"B2F" => 'F',
"B2I" => 'I',
"B2V" => 'V',
"B3A" => 'A',
"B3D" => 'D',
"B3E" => 'E',
"B3K" => 'K',
"B3S" => 'S',
"B3U" => 'H',
"B3X" => 'N',
"B3Y" => 'Y',
"BB6" => 'C',
"BB7" => 'C',
"BB8" => 'F',
"BB9" => 'C',
"BBC" => 'C',
"BCS" => 'C',
"BFD" => 'D',
"BG1" => 'S',
"BH2" => 'D',
"BHD" => 'D',
"BIF" => 'F',
"BIU" => 'I',
"BL2" => 'L',
"BLE" => 'L',
"BLY" => 'K',
"BMT" => 'T',
"BNN" => 'F',
"BOR" => 'R',
"BPE" => 'C',
"BSE" => 'S',
"BTA" => 'L',
"BTC" => 'C',
"BTR" => 'W',
"BUC" => 'C',
"BUG" => 'V',
"C1X" => 'K',
"C22" => 'A',
"C3Y" => 'C',
"C4R" => 'C',
"C5C" => 'C',
"C6C" => 'C',
"CAF" => 'C',
"CAS" => 'C',
"CAY" => 'C',
"CCL" => 'K',
"CCS" => 'C',
"CEA" => 'C',
"CG6" => 'C',
"CGA" => 'E',
"CGU" => 'E',
"CHP" => 'G',
"CIR" => 'R',
"CLE" => 'L',
"CLG" => 'K',
"CLH" => 'K',
"CME" => 'C',
"CMH" => 'C',
"CML" => 'C',
"CMT" => 'C',
"CR5" => 'G',
"CS0" => 'C',
"CS1" => 'C',
"CS3" => 'C',
"CS4" => 'C',
"CSA" => 'C',
"CSB" => 'C',
"CSD" => 'C',
"CSE" => 'C',
"CSJ" => 'C',
"CSO" => 'C',
"CSP" => 'C',
"CSR" => 'C',
"CSS" => 'C',
"CSU" => 'C',
"CSW" => 'C',
"CSX" => 'C',
"CSZ" => 'C',
"CTE" => 'W',
"CTH" => 'T',
"CWR" => 'S',
"CXM" => 'M',
"CY0" => 'C',
"CY1" => 'C',
"CY3" => 'C',
"CY4" => 'C',
"CYA" => 'C',
"CYD" => 'C',
"CYF" => 'C',
"CYG" => 'C',
"CYJ" => 'K',
"CYM" => 'C',
"CYQ" => 'C',
"CYR" => 'C',
"CYS" => 'C',
"CZ2" => 'C',
"CZZ" => 'C',
"D11" => 'T',
"D3P" => 'G',
"DAB" => 'A',
"DAH" => 'F',
"DAL" => 'A',
"DAR" => 'R',
"DAS" => 'D',
"DBB" => 'T',
"DBS" => 'S',
"DBU" => 'T',
"DBY" => 'Y',
"DBZ" => 'A',
"DC2" => 'C',
"DCY" => 'C',
"DDE" => 'H',
"DGH" => 'G',
"DGL" => 'E',
"DGN" => 'Q',
"DHA" => 'S',
"DHI" => 'H',
"DHN" => 'V',
"DHV" => 'V',
"DI7" => 'Y',
"DIL" => 'I',
"DIR" => 'R',
"DIV" => 'V',
"DLE" => 'L',
"DLS" => 'K',
"DLY" => 'K',
"DM0" => 'K',
"DMH" => 'N',
"DMK" => 'D',
"DNE" => 'L',
"DNL" => 'K',
"DNP" => 'A',
"DNS" => 'K',
"DOH" => 'D',
"DON" => 'L',
"DPL" => 'P',
"DPN" => 'F',
"DPP" => 'A',
"DPQ" => 'Y',
"DPR" => 'P',
"DSE" => 'S',
"DSG" => 'N',
"DSN" => 'S',
"DSP" => 'D',
"DTH" => 'T',
"DTR" => 'W',
"DTY" => 'Y',
"DVA" => 'V',
"DYS" => 'C',
"ECC" => 'Q',
"EFC" => 'C',
"EHP" => 'F',
"ESB" => 'Y',
"ESC" => 'M',
"EXY" => 'L',
"F2F" => 'F',
"FAK" => 'K',
"FB5" => 'A',
"FB6" => 'A',
"FCL" => 'F',
"FGA" => 'E',
"FGL" => 'G',
"FGP" => 'S',
"FH7" => 'K',
"FHL" => 'K',
"FHO" => 'K',
"FLA" => 'A',
"FLE" => 'L',
"FLT" => 'Y',
"FME" => 'M',
"FOE" => 'C',
"FP9" => 'P',
"FT6" => 'W',
"FTR" => 'W',
"FTY" => 'Y',
"FVA" => 'V',
"FZN" => 'K',
"GAU" => 'E',
"GFT" => 'S',
"GGL" => 'E',
"GHG" => 'Q',
"GHP" => 'G',
"GL3" => 'G',
"GLH" => 'Q',
"GLJ" => 'E',
"GLK" => 'E',
"GLN" => 'Q',
"GLQ" => 'E',
"GLU" => 'E',
"GLY" => 'G',
"GLZ" => 'G',
"GMA" => 'E',
"GPL" => 'K',
"GSC" => 'G',
"GSU" => 'E',
"GT9" => 'C',
"GVL" => 'S',
"H14" => 'F',
"H5M" => 'P',
"HAC" => 'A',
"HAR" => 'R',
"HBN" => 'H',
"HHI" => 'H',
"HIA" => 'H',
"HIC" => 'H',
"HIP" => 'H',
"HIQ" => 'H',
"HIS" => 'H',
"HL2" => 'L',
"HLU" => 'L',
"HMR" => 'R',
"HPC" => 'F',
"HPE" => 'F',
"HPH" => 'F',
"HPQ" => 'F',
"HQA" => 'A',
"HRG" => 'R',
"HRP" => 'W',
"HS8" => 'H',
"HS9" => 'H',
"HSE" => 'S',
"HSL" => 'S',
"HSO" => 'H',
"HSV" => 'H',
"HTI" => 'C',
"HTN" => 'N',
"HTR" => 'W',
"HV5" => 'A',
"HVA" => 'V',
"HY3" => 'P',
"HYP" => 'P',
"HZP" => 'P',
"I2M" => 'I',
"I58" => 'K',
"IAM" => 'A',
"IAR" => 'R',
"IAS" => 'D',
"IEL" => 'K',
"IGL" => 'G',
"IIL" => 'I',
"ILE" => 'I',
"ILG" => 'E',
"ILX" => 'I',
"IML" => 'I',
"IOY" => 'F',
"IPG" => 'G',
"IT1" => 'K',
"IYR" => 'Y',
"IYT" => 'T',
"IZO" => 'M',
"JJJ" => 'C',
"JJK" => 'C',
"JJL" => 'C',
"K1R" => 'C',
"KCX" => 'K',
"KGC" => 'K',
"KNB" => 'A',
"KOR" => 'M',
"KPF" => 'K',
"KPI" => 'K',
"KST" => 'K',
"KYN" => 'W',
"KYQ" => 'K',
"LA2" => 'K',
"LAA" => 'D',
"LAL" => 'A',
"LBY" => 'K',
"LCK" => 'K',
"LCX" => 'K',
"LDH" => 'K',
"LED" => 'L',
"LEF" => 'L',
"LEH" => 'L',
"LEI" => 'V',
"LEM" => 'L',
"LEN" => 'L',
"LET" => 'K',
"LEU" => 'L',
"LEX" => 'L',
"LLP" => 'K',
"LLY" => 'K',
"LME" => 'E',
"LMF" => 'K',
"LMQ" => 'Q',
"LP6" => 'K',
"LPD" => 'P',
"LPG" => 'G',
"LPS" => 'S',
"LSO" => 'K',
"LTR" => 'W',
"LVG" => 'G',
"LVN" => 'V',
"LYF" => 'K',
"LYK" => 'K',
"LYM" => 'K',
"LYN" => 'K',
"LYR" => 'K',
"LYS" => 'K',
"LYX" => 'K',
"LYZ" => 'K',
"M0H" => 'C',
"M2L" => 'K',
"M2S" => 'M',
"M30" => 'G',
"M3L" => 'K',
"MAA" => 'A',
"MAI" => 'R',
"MBQ" => 'Y',
"MC1" => 'S',
"MCL" => 'K',
"MCS" => 'C',
"MD3" => 'C',
"MD6" => 'G',
"MDF" => 'Y',
"MEA" => 'F',
"MED" => 'M',
"MEG" => 'E',
"MEN" => 'N',
"MEQ" => 'Q',
"MET" => 'M',
"MEU" => 'G',
"MGG" => 'R',
"MGN" => 'Q',
"MGY" => 'G',
"MHL" => 'L',
"MHO" => 'M',
"MHS" => 'H',
"MIS" => 'S',
"MK8" => 'L',
"ML3" => 'K',
"MLE" => 'L',
"MLL" => 'L',
"MLY" => 'K',
"MLZ" => 'K',
"MME" => 'M',
"MMO" => 'R',
"MND" => 'N',
"MNL" => 'L',
"MNV" => 'V',
"MP8" => 'P',
"MPQ" => 'G',
"MSA" => 'G',
"MSE" => 'M',
"MSL" => 'M',
"MSO" => 'M',
"MT2" => 'M',
"MTY" => 'Y',
"MVA" => 'V',
"N10" => 'S',
"N7P" => 'P',
"N80" => 'P',
"N8P" => 'P',
"NA8" => 'A',
"NAL" => 'A',
"NAM" => 'A',
"NB8" => 'N',
"NBQ" => 'Y',
"NC1" => 'S',
"NCB" => 'A',
"NDF" => 'F',
"NEM" => 'H',
"NEP" => 'H',
"NFA" => 'F',
"NHL" => 'E',
"NIY" => 'Y',
"NLE" => 'L',
"NLN" => 'L',
"NLO" => 'L',
"NLP" => 'L',
"NLQ" => 'Q',
"NLW" => 'L',
"NMC" => 'G',
"NMM" => 'R',
"NNH" => 'R',
"NPH" => 'C',
"NPI" => 'A',
"NTR" => 'Y',
"NTY" => 'Y',
"NVA" => 'V',
"NYS" => 'C',
"NZH" => 'H',
"OAR" => 'R',
"OAS" => 'S',
"OBS" => 'K',
"OCS" => 'C',
"OCY" => 'C',
"OHI" => 'H',
"OHS" => 'D',
"OLT" => 'T',
"OLZ" => 'S',
"OMT" => 'M',
"ONH" => 'A',
"OPR" => 'R',
"ORN" => 'A',
"ORQ" => 'R',
"OSE" => 'S',
"OTH" => 'T',
"OXX" => 'D',
"P1L" => 'C',
"P2Y" => 'P',
"PAQ" => 'Y',
"PAS" => 'D',
"PAT" => 'W',
"PAU" => 'A',
"PBB" => 'C',
"PBF" => 'F',
"PCA" => 'E',
"PCC" => 'P',
"PCS" => 'F',
"PEC" => 'C',
"PF5" => 'F',
"PFF" => 'F',
"PG1" => 'S',
"PG9" => 'G',
"PGY" => 'G',
"PH6" => 'P',
"PHA" => 'F',
"PHD" => 'D',
"PHE" => 'F',
"PHI" => 'F',
"PHL" => 'F',
"PHM" => 'F',
"PLE" => 'L',
"PM3" => 'F',
"POM" => 'P',
"PPN" => 'F',
"PR3" => 'C',
"PR9" => 'P',
"PRO" => 'P',
"PRS" => 'P',
"PSA" => 'F',
"PSH" => 'H',
"PTH" => 'Y',
"PTM" => 'Y',
"PTR" => 'Y',
"PVH" => 'H',
"PYA" => 'A',
"PYX" => 'C',
"QCS" => 'C',
"QMM" => 'Q',
"QPA" => 'C',
"QPH" => 'F',
"R1A" => 'C',
"R4K" => 'W',
"RE0" => 'W',
"RE3" => 'W',
"RPI" => 'R',
"RVX" => 'S',
"RZ4" => 'S',
"S1H" => 'S',
"S2C" => 'C',
"S2D" => 'A',
"S2P" => 'A',
"SAC" => 'S',
"SAH" => 'C',
"SAR" => 'G',
"SBL" => 'S',
"SCH" => 'C',
"SCS" => 'C',
"SCY" => 'C',
"SDP" => 'S',
"SE7" => 'A',
"SEB" => 'S',
"SEG" => 'A',
"SEL" => 'S',
"SEM" => 'S',
"SEN" => 'S',
"SEP" => 'S',
"SER" => 'S',
"SET" => 'S',
"SGB" => 'S',
"SHC" => 'C',
"SHP" => 'G',
"SHR" => 'K',
"SIB" => 'C',
"SLR" => 'P',
"SLZ" => 'K',
"SMC" => 'C',
"SME" => 'M',
"SMF" => 'F',
"SNC" => 'C',
"SNN" => 'N',
"SOC" => 'C',
"SOY" => 'S',
"SRZ" => 'S',
"STY" => 'Y',
"SUN" => 'S',
"SVA" => 'S',
"SVV" => 'S',
"SVW" => 'S',
"SVX" => 'S',
"SVY" => 'S',
"SVZ" => 'S',
"SYS" => 'C',
"T11" => 'F',
"TAV" => 'D',
"TBG" => 'V',
"TBM" => 'T',
"TCQ" => 'Y',
"TCR" => 'W',
"TDD" => 'L',
"TFQ" => 'F',
"TH6" => 'T',
"THC" => 'T',
"THR" => 'T',
"THZ" => 'R',
"TIH" => 'A',
"TMB" => 'T',
"TMD" => 'T',
"TNB" => 'C',
"TNR" => 'S',
"TOQ" => 'W',
"TPL" => 'W',
"TPO" => 'T',
"TPQ" => 'Y',
"TQI" => 'W',
"TQQ" => 'W',
"TRF" => 'W',
"TRG" => 'K',
"TRN" => 'W',
"TRO" => 'W',
"TRP" => 'W',
"TRQ" => 'W',
"TRW" => 'W',
"TRX" => 'W',
"TRY" => 'W',
"TTQ" => 'W',
"TTS" => 'Y',
"TXY" => 'Y',
"TY1" => 'Y',
"TY2" => 'Y',
"TY3" => 'Y',
"TY5" => 'Y',
"TYB" => 'Y',
"TYI" => 'Y',
"TYJ" => 'Y',
"TYN" => 'Y',
"TYO" => 'Y',
"TYQ" => 'Y',
"TYR" => 'Y',
"TYS" => 'Y',
"TYT" => 'Y',
"TYW" => 'Y',
"TYY" => 'Y',
"UMA" => 'A',
"VAD" => 'V',
"VAF" => 'V',
"VAL" => 'V',
"VB1" => 'K',
"WLU" => 'L',
"WPA" => 'F',
"WRP" => 'W',
"WVL" => 'V',
"X2W" => 'E',
"XCN" => 'C',
"XDT" => 'T',
"XPR" => 'P',
"XSN" => 'N',
"XX1" => 'K',
"YCM" => 'C',
"YOF" => 'Y',
"YTH" => 'T',
"Z01" => 'A',
"ZAL" => 'A',
"ZCL" => 'F',
"ZU0" => 'T',
"ZZJ" => 'A',
)
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 856 | """
The `Utils` has common utils functions and types used in other modules.
```julia
using MIToS.Utils
```
"""
module Utils
using Downloads
using CodecZlib
using NamedArrays
using Logging
import LightXML
export # GeneralUtils.jl
All,
get_n_words,
hascoordinates,
select_element,
matrix2list,
list2matrix,
check_pdbcode,
getarray,
# Read.jl
FileFormat,
lineiterator,
check_file,
isnotemptyfile,
download_file,
read_file,
parse_file,
# Write.jl
write_file,
print_file,
# ThreeLetterResidues.jl
THREE2ONE,
# Imported from Base (and exported for docs)
read,
write
include("GeneralUtils.jl")
include("Read.jl")
include("Write.jl")
include("ThreeLetterResidues.jl")
@deprecate deleteitems!(vector::Vector, items) filter!(x -> x ∉ items, vector)
end # Utils
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1427 | """
`write_file{T<:FileFormat}(filename::AbstractString, object, format::Type{T}, mode::ASCIIString="w")`
This function opens a file with `filename` and `mode` (default: "w")
and writes (`print_file`) the `object` with the given `format`.
Gzipped files should end on `.gz`.
"""
function write_file(
filename::AbstractString,
object,
format::Type{T},
mode::String = "w",
) where {T<:FileFormat}
fh = open(filename, mode)
if endswith(filename, ".gz")
fh = GzipCompressorStream(fh)
end
try
print_file(fh, object, format)
finally
close(fh)
end
nothing
end
function Base.write(
filename::AbstractString,
object,
format::Type{T},
mode::String = "w",
) where {T<:FileFormat}
Base.depwarn(
"Using write with $format is deprecated, use write_file instead.",
:write,
force = true,
)
write_file(filename, object, format, mode)
end
# print_file
# ----------
# Other modules can add their own definition of print_file for their own `FileFormat`s
# Utils.print_file(io::IO,
print_file(object, format::Type{T}) where {T<:FileFormat} = print_file(stdout, object, T)
function Base.print(fh::IO, object, format::Type{T}) where {T<:FileFormat}
Base.depwarn(
"Using print with $format is deprecated, use print_file instead.",
:print,
force = true,
)
print_file(fh, object, format)
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1157 | """
You need to `include` this file or load this module in your Julia session to run the tests
using `ReTest`. The main advantage of `ReTest` is that it allows you to run only selected
tests. As `ReTest` is not a `MIToS` dependency, so you need to install it manually.
It is also recommended to install `Revise`. Also, you will need to install the test
dependencies, `Documenter` and `ROCAnalysis`, outside the MIToS environment:
```julia
using Pkg
Pkg.add("ReTest")
Pkg.add("Revise")
Pkg.add("Documenter")
Pkg.add("ROCAnalysis")
```
An example of usage if you want to run the `hcat` tests from the `MSA` module:
```julia
push!(LOAD_PATH, joinpath(homedir(), ".julia", "dev", "MIToS", "test"))
using Revise, MIToSTests
MIToSTests.retest("MSA")
MIToSTests.retest("hcat")
```
Note that we need to fisrt run the most general test and then the specific one. Otherwise,
`ReTest` will not be able to find the `hcat` test.
NOTE: For some reason, after modifying the tests, `Revise` does not detect the changes
automatically. However, runing `retest` again for the whole module looks to do the trick.
"""
module MIToSTests
using ReTest
include("tests.jl")
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 212 | using Test
include("tests.jl")
# Doctests
if VERSION >= v"1.7.0" # Julia 1.7 changed the default random number generator Mersenne Twister to Xoshiro256++
doctest(MIToS)
end
print("""
----- =D -----
""")
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2134 | using Documenter
using MIToS
using MIToS.Utils
using MIToS.MSA
using MIToS.Information
using MIToS.PDB
using MIToS.SIFTS
using MIToS.Pfam
using Aqua
using LinearAlgebra
using Random
using OrderedCollections # OrderedDict
using Statistics # mean
using DelimitedFiles # readdlm
using ROCAnalysis # AUC
using Clustering # test/MSA/Hobohm.jl
using NamedArrays # array
using StatsBase # WeightVec
using PairwiseListMatrices # getlist
const DATA = joinpath(@__DIR__, "data")
@testset verbose = true "Aqua" begin
# The ambiguities are not caused by MIToS
Aqua.test_all(MIToS, ambiguities = false)
end
# Utils
@testset verbose = true "Utils" begin
include("Utils/GeneralUtils.jl")
end
# MSA
@testset verbose = true "MSA" begin
include("MSA/Residues.jl")
include("MSA/Alphabet.jl")
include("MSA/ThreeLetters.jl")
include("MSA/Annotations.jl")
include("MSA/MultipleSequenceAlignment.jl")
include("MSA/GeneralParserMethods.jl")
include("MSA/IO.jl")
include("MSA/General.jl")
include("MSA/MSAEditing.jl")
include("MSA/MSAStats.jl")
include("MSA/Sequences.jl")
include("MSA/Shuffle.jl")
include("MSA/Identity.jl")
include("MSA/Hobohm.jl")
include("MSA/MSAAnnotations.jl")
include("MSA/GetIndex.jl")
include("MSA/Concatenation.jl")
end
# Information
@testset verbose = true "Information" begin
include("Information/ContingencyTables.jl")
include("Information/Counters.jl")
include("Information/InformationMeasures.jl")
include("Information/Iterations.jl")
include("Information/CorrectedMutualInformation.jl")
include("Information/Gaps.jl")
include("Information/Externals.jl")
end
# PDB
@testset verbose = true "PDB" begin
include("PDB/PDB.jl")
include("PDB/Contacts.jl")
include("PDB/Kabsch.jl")
include("PDB/Internals.jl")
include("PDB/Sequences.jl")
include("PDB/AlphaFoldDB.jl")
end
# SIFTS
@testset verbose = true "SIFTS" begin
include("SIFTS/SIFTS.jl")
end
# Pfam
@testset verbose = true "Pfam" begin
include("Pfam/Pfam.jl")
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 8252 | @testset "ContingencyTables" begin
@testset "Creation and Getters" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
table = ContingencyTable(Float64, Val{N}, alphabet) # zeros in MIToS 1.0
@test size(table) == (Int[length(alphabet) for i = 1:N]...,)
@test length(table) == length(alphabet)^N
@test length(getmarginals(table)) == length(alphabet) * N
@test size(getmarginals(table)) == (length(alphabet), N)
@test sum(gettable(table)) == 0.0
@test sum(table) == 0.0 # == gettotal(table)
@test sum(table.temporal) == 0.0
@test sum(getmarginals(table)) == 0.0
@test gettotal(table) == 0.0
@test collect(table) == gettablearray(table) # Iteration interface in MIToS 1.0
@test isa(gettablearray(table), Array{Float64,N})
@test isa(getmarginalsarray(table), Array{Float64,2})
end
end
@testset "Similar" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
table = ContingencyTable(Float64, Val{N}, alphabet) # zeros in MIToS 1.0
@test table == similar(table)
@test typeof(table) == typeof(similar(table))
@test table == similar(table, BigFloat)
@test typeof(table) != typeof(similar(table, BigFloat))
@test BigFloat == eltype(similar(table, BigFloat))
end
end
end
end
@testset "Show" begin
out = IOBuffer()
d1 = ContingencyTable(Float64, Val{1}, UngappedAlphabet())
d2 = ContingencyTable(Float64, Val{2}, UngappedAlphabet())
show(out, MIME"text/plain"(), d1)
d1_str = String(take!(out))
show(out, MIME"text/plain"(), d2)
d2_str = String(take!(out))
@test startswith(d1_str, r"ContingencyTable{Float64,\s?1,\s?UngappedAlphabet} :")
@test occursin("table :", d1_str)
@test occursin("Dim_1", d1_str)
@test !occursin("Dim_2", d1_str)
@test !occursin("marginals :", d1_str)
@test occursin("total :", d1_str)
@test startswith(d2_str, r"ContingencyTable{Float64,\s?2,\s?UngappedAlphabet} :")
@test occursin("table :", d2_str)
@test occursin("Dim_1", d2_str)
@test occursin("Dim_2", d2_str)
@test occursin("marginals :", d2_str)
@test occursin("total :", d2_str)
end
@testset "Indexing" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
table = ContingencyTable(Float64, Val{2}, alphabet)
if isa(getalphabet(table), ReducedAlphabet)
@test table[1, 3] == 0.0
table[1, 3] = 100.0
i = 2 * length(getalphabet(table)) + 1 # using one index
@test table[i] == 100.0
table[i] = 10.0
@test table[Residue('A'), Residue('R')] == 10.0 # using two indices
table[Residue('A'), Residue('R')] = 20.0
@test table["AILMV", "RHK"] == 20.0
table["AILMV", "RHK"] = 30.0
@test table[1, 3] == 30.0
else
@test table[1, 2] == 0.0
table[1, 2] = 100.0
i = length(getalphabet(table)) + 1 # using one index
@test table[i] == 100.0
table[i] = 10.0
@test table[Residue('A'), Residue('R')] == 10.0 # using two indices
table[Residue('A'), Residue('R')] = 20.0
@test table["A", "R"] == 20.0
table["A", "R"] = 30.0
@test table[1, 2] == 30.0
end
end
end
@testset "Update" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
table = ContingencyTable(Float64, Val{N}, alphabet)
fill!(table.temporal, 1.0)
@test sum(table.temporal) == 22.0^N
@test sum(table) == 0.0
@test sum(getmarginals(table)) == 0.0
@test gettotal(table) == 0.0
Information._update!(table)
@test sum(table.temporal) == 22.0^N
if isa(getalphabet(table), ReducedAlphabet)
@test table[1] == 5.0^N
@test sum(table) == 20.0^N
@test sum(getmarginals(table)) == N * (20.0^N)
@test gettotal(table) == 20.0^N
if N == 1
@test vec(getarray(getmarginals(table))) ==
vec(getarray(gettable(table)))
elseif N == 2
@test getmarginals(table)[1] ==
sum(5.0 * n for n in [5, 4, 3, 2, 3, 1, 1, 1])
elseif N == 2
@test getmarginals(table)[1] ==
sum(5.0 * n * 20.0 for n in [5, 4, 3, 2, 3, 1, 1, 1])
end
else
@test table[1] == 1.0
@test getmarginals(table)[1] == length(alphabet)^(N - 1)
@test sum(table) == length(alphabet)^N
@test sum(getmarginals(table)) == float(N) * (length(alphabet)^N)
@test gettotal(table) == float(length(alphabet))^N
end
end
end
end
@testset "Fill" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
table = ContingencyTable(Float64, Val{N}, alphabet)
fill!(table, AdditiveSmoothing(1.0))
@test table[1] == 1.0
@test getmarginals(table)[1] == length(alphabet)^(N - 1)
@test sum(table) == length(alphabet)^N
@test sum(getmarginals(table)) == float(N) * (length(alphabet)^N)
@test gettotal(table) == float(length(alphabet))^N
end
end
end
@testset "Pseudocount" begin
@test AdditiveSmoothing(1.0) == one(AdditiveSmoothing{Float64})
@test AdditiveSmoothing(0.0) == zero(AdditiveSmoothing{Float64})
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
table = ContingencyTable(Float64, Val{N}, alphabet)
apply_pseudocount!(table, zero(AdditiveSmoothing{Float64}))
@test sum(table.temporal) == 0.0
@test sum(table) == 0.0
@test sum(getmarginals(table)) == 0.0
@test gettotal(table) == 0.0
apply_pseudocount!(table, AdditiveSmoothing(1.0))
@test table[1] == 1.0
@test getmarginals(table)[1] == length(alphabet)^(N - 1)
@test sum(table) == length(alphabet)^N
@test sum(getmarginals(table)) == float(N) * (length(alphabet)^N)
@test gettotal(table) == float(length(alphabet))^N
table = ContingencyTable(Float64, Val{N}, alphabet)
apply_pseudocount!(table, 1.0)
@test table[1] == 1.0
@test getmarginals(table)[1] == length(alphabet)^(N - 1)
@test sum(table) == length(alphabet)^N
@test sum(getmarginals(table)) == float(N) * (length(alphabet)^N)
@test gettotal(table) == float(length(alphabet))^N
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 14573 | @testset "CorrectedMutualInformation" begin
Gaoetal2011 = joinpath(DATA, "Gaoetal2011.fasta")
function gao11_buslje09(measure)
filename = string("data_Gaoetal2011_soft_Busljeetal2009_measure_", measure, ".txt")
joinpath(DATA, filename)
end
## Column numbers for the output of Buslje et. al. 2009
SCORE = 9
ZSCORE = 12
MIToS_SCORE = 2
MIToS_ZSCORE = 1
@testset "APC!" begin
for MI in (
[
0.0 2.0 4.0
2.0 0.0 6.0
4.0 6.0 0.0
],
PairwiseListMatrix([2.0, 4.0, 6.0]),
PairwiseListMatrix([0.0, 2.0, 4.0, 0.0, 6.0, 0.0], true),
NamedArray(PairwiseListMatrix([2.0, 4.0, 6.0])),
NamedArray(PairwiseListMatrix([0.0, 2.0, 4.0, 0.0, 6.0, 0.0], true)),
)
if isa(MI, Matrix{Float64})
@test Information._mean_column(MI) == [3.0, 4.0, 5.0]
@test Information._mean_total(MI) == 4.0
end
MIp = APC!(MI)
@test filter(x -> !isnan(x), vec(Matrix(MIp))) ≈ filter(x -> !isnan(x), vec([
NaN -1.0 0.25
-1.0 NaN 1.00
0.25 1.00 NaN
]))
end
end
@testset "Simple example I" begin
aln = Residue[
'A' 'A'
'A' 'R'
]
@testset "MI" begin
# Fill the Pij matrix of the example
Pij = zeros(Float64, 20, 20)
N = 2 # There are 2 sequences
Pij[1, 1] = (1 / N) # A A
Pij[1, 2] = (1 / N) # A R
@test sum(Pij) ≈ 1.0 # Is the matrix correct?
# Fill the marginals
Pi = dropdims(Base.sum(Pij, dims = 2), dims = 2)
@test Pi[1] == 1.0 # Is always A
Pj = dropdims(Base.sum(Pij, dims = 1), dims = 1)
@test Pj[1] == 0.5 # A
@test Pj[2] == 0.5 # R
# Start to sum with 0.0
total = 0.0
for i = 1:20, j = 1:20
if Pij[i, j] != 0.0 && Pi[i] != 0.0 && Pj[j] != 0.0
total += (Pij[i, j] * log(Pij[i, j] / (Pi[i] * Pj[j]))) # 0.5 * log(0.5/(0.5 * 1.0)) == 0.0
end
end
# Compare with MIToS result
mi = mapcolpairfreq!(
Information._mutual_information,
aln,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
)
@test mi[1, 2] == total
end
@testset "MI: Using pseudocount (0.05)" begin
Pij = zeros(Float64, 20, 20)
N = (400 * 0.05) + 2 # 0.05 is the pseudocount and there are 2 sequences
fill!(Pij, 0.05 / N)
Pij[1, 1] = (1.05 / N) # A A
Pij[1, 2] = (1.05 / N) # A R
@test sum(Pij) ≈ 1.0
Pi = dropdims(Base.sum(Pij, dims = 2), dims = 2)
Pj = dropdims(Base.sum(Pij, dims = 1), dims = 1)
total = 0.0
for i = 1:20, j = 1:20
total += (Pij[i, j] * log(Pij[i, j] / (Pi[i] * Pj[j])))
end
# Compare with MIToS result
mi = mapcolpairfreq!(
Information._mutual_information,
aln,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
pseudocounts = AdditiveSmoothing(0.05),
)
@test mi[1, 2] ≈ total
@testset "APC!" begin
APC!(mi)
@test isnan(mi[1, 1])
@test isapprox(mi[1, 2], 0.0, rtol = 1e-16)
zerodiagonal = Float64[
0.0 total
total 0.0
]
# MI.. == MI.j == MIi. == total
# APC = (total * total) / total == total
# MI - APC == total - total == 0.0
APC!(zerodiagonal)
@test zerodiagonal[1, 2] ≈ 0.0
end
end
@testset "Z-score" begin
# 2 Possibilities: A A A A
# A R R A
# Is almost the same MI, Z score should be 0.0
results = buslje09(aln, lambda = 0.05, clustering = false, apc = false)
@test results[MIToS_ZSCORE][1, 2] ≈ 0.0
@test aln == Residue[
'A' 'A'
'A' 'R'
]
end
end
@testset "Simple example II" begin
aln = Residue[
'R' 'A'
'A' 'R'
]
@testset "MI" begin
Pij = zeros(Float64, 20, 20)
N = 2 # There are 2 sequences
Pij[2, 1] = (1 / N) # R A
Pij[1, 2] = (1 / N) # A R
@test sum(Pij) ≈ 1.0
Pi = dropdims(Base.sum(Pij, dims = 2), dims = 2)
@test Pi[2] == 0.5 # R
@test Pi[1] == 0.5 # A
Pj = dropdims(Base.sum(Pij, dims = 1), dims = 1)
@test Pj[1] == 0.5 # A
@test Pj[2] == 0.5 # R
total = 0.0
for i = 1:20, j = 1:20
if Pij[i, j] != 0.0 && Pi[i] != 0.0 && Pj[j] != 0.0
total += (Pij[i, j] * log(Pij[i, j] / (Pi[i] * Pj[j])))
end
end
@test total == 0.5 * log(2) + 0.5 * log(2) # 0.5/(0.5*0.5) == 2
# Compare with MIToS result
mi = mapcolpairfreq!(
Information._mutual_information,
aln,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
)
@test mi[1, 2] == total
end
@testset "MI: Using pseudocount (0.05)" begin
Pij = zeros(Float64, 20, 20)
N = (400 * 0.05) + 2 # 0.05 is the pseudocount and there are 2 sequences
fill!(Pij, 0.05 / N)
Pij[2, 1] = (1.05 / N) # R A
Pij[1, 2] = (1.05 / N) # A R
@test sum(Pij) ≈ 1.0
Pi = dropdims(Base.sum(Pij, dims = 2), dims = 2)
Pj = dropdims(Base.sum(Pij, dims = 1), dims = 1)
total = 0.0
for i = 1:20, j = 1:20
total += (Pij[i, j] * log(Pij[i, j] / (Pi[i] * Pj[j])))
end
# Compare with MIToS result
mi = mapcolpairfreq!(
Information._mutual_information,
aln,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
pseudocounts = AdditiveSmoothing(0.05),
)
@test mi[1, 2] ≈ total
@testset "APC!" begin
APC!(mi)
@test isnan(mi[1, 1])
@test mi[1, 2] ≈ 0.0
zerodiagonal = Float64[
0.0 total
total 0.0
]
# MI.. == MI.j == MIi. == total
# APC = (total * total) / total == total
# MI - APC == total - total == 0.0
APC!(zerodiagonal)
@test zerodiagonal[1, 2] ≈ 0.0
end
end
@testset "Z-score" begin
# 4 Possibilities: R A R A A R A R
# A R R A A R R A
mi = mapcolpairfreq!(
Information._mutual_information,
aln,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
pseudocounts = AdditiveSmoothing(0.05),
)
other_posib = Residue[
'A' 'R'
'A' 'R'
]
other_mi = mapcolpairfreq!(
Information._mutual_information,
other_posib,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
pseudocounts = AdditiveSmoothing(0.05),
)
# The mean should be:
r_mean = 0.5 * (mi[1, 2] + other_mi[1, 2])
# And the std should be similar to:
r_std = 0.5 * (sqrt((mi[1, 2] - r_mean)^2) + sqrt((other_mi[1, 2] - r_mean)^2))
results =
buslje09(aln, lambda = 0.05, clustering = false, apc = false, samples = 100)
@test isapprox(
results[MIToS_ZSCORE][1, 2],
(mi[1, 2] - r_mean) / r_std,
atol = 0.55,
)
results = buslje09(
aln,
lambda = 0.05,
clustering = false,
apc = false,
samples = 1000,
)
@test isapprox(
results[MIToS_ZSCORE][1, 2],
(mi[1, 2] - r_mean) / r_std,
atol = 0.15,
)
results = buslje09(
aln,
lambda = 0.05,
clustering = false,
apc = false,
samples = 10000,
)
@test isapprox(
results[MIToS_ZSCORE][1, 2],
(mi[1, 2] - r_mean) / r_std,
atol = 0.055,
)
@test aln == Residue[
'R' 'A'
'A' 'R'
]
end
end
@testset "Results from Buslje et. al. 2009" begin
@testset "Simple" begin
data = readdlm(
joinpath(DATA, "data_simple_soft_Busljeetal2009_measure_MI.txt"),
comments = true,
)
_msa = read_file(joinpath(DATA, "simple.fasta"), FASTA)
results = buslje09(_msa, lambda = 0.0, clustering = false, apc = false)
@test isapprox(Float64(data[1, SCORE]), results[MIToS_SCORE][1, 2], atol = 1e-6)
@test isapprox(
Float64(data[1, ZSCORE]),
results[MIToS_ZSCORE][1, 2],
atol = 2.0,
)
end
@testset "Gaoetal2011" begin
data = readdlm(gao11_buslje09("MI"), comments = true)
_msa = read_file(Gaoetal2011, FASTA)
results = buslje09(_msa, lambda = 0.0, clustering = false, apc = false)
@test isapprox(
[Float64(x) for x in data[:, SCORE]],
matrix2list(results[MIToS_SCORE]),
atol = 1e-6,
)
@test isapprox(
[Float64(x) for x in data[:, ZSCORE]],
matrix2list(results[MIToS_ZSCORE]),
atol = 2.0,
)
# println(cor(convert(Vector{Float64},data[:,ZSCORE]),matrix2list(results[MIToS_ZSCORE])))
@testset "cMI" begin
result = copy(results[1])
result[diagind(result)] .= 0.0
@test cumulative(results[1], -Inf) == sum(result, dims = 1)
@test all(
(
cumulative(results[2], 0.5) .-
[0.0, 0.0, 1.38629, 1.38629, 1.38629, 0.0]'
) .< 0.00001,
)
# 1.38629 = 0.693147 + 0.693147
end
_msa = read_file(Gaoetal2011, FASTA)
result_0_05 = buslje09(_msa, lambda = 0.05, clustering = false, apc = false)
@test isapprox(
result_0_05[MIToS_SCORE][1, 2],
0.33051006116310444,
atol = 1e-14,
)
end
end
@testset "MI + clustering" begin
data = readdlm(gao11_buslje09("MI_clustering"), comments = true)
_msa = read_file(Gaoetal2011, FASTA)
results = buslje09(_msa, lambda = 0.0, clustering = true, apc = false)
@test isapprox(
[Float64(x) for x in data[:, SCORE]],
matrix2list(results[MIToS_SCORE]),
atol = 1e-6,
)
@test isapprox(
[Float64(x) for x in data[:, ZSCORE]],
matrix2list(results[MIToS_ZSCORE]),
atol = 2.0,
)
end
@testset "MIp" begin
data = readdlm(gao11_buslje09("MI_APC"), comments = true)
_msa = read_file(Gaoetal2011, FASTA)
results = buslje09(_msa, lambda = 0.0, clustering = false, apc = true)
@test isapprox(
[Float64(x) for x in data[:, SCORE]],
matrix2list(results[MIToS_SCORE]),
atol = 1e-6,
)
@test isapprox(
[Float64(x) for x in data[:, ZSCORE]],
matrix2list(results[MIToS_ZSCORE]),
atol = 2.0,
)
@test isapprox(results[MIToS_SCORE][5, 6], 0.018484, atol = 0.000001)
end
@testset "MIp + clustering" begin
data = readdlm(gao11_buslje09("MI_APC_clustering"), comments = true)
_msa = read_file(Gaoetal2011, FASTA)
results = buslje09(_msa, lambda = 0.0, clustering = true, apc = true)
@test isapprox(
[Float64(x) for x in data[:, SCORE]],
matrix2list(results[MIToS_SCORE]),
atol = 1e-6,
)
@test isapprox(
[Float64(x) for x in data[:, ZSCORE]],
matrix2list(results[MIToS_ZSCORE]),
atol = 2.0,
)
@test isapprox(results[MIToS_SCORE][5, 6], 0.018484, atol = 0.000001)
end
@testset "BLMI" begin
@testset "Simple" begin
file = joinpath(DATA, "simple.fasta")
msa = read_file(file, FASTA)
busl = buslje09(msa)
blmi = BLMI(msa)
@test PairwiseListMatrices.getlist(busl[1]) ≈
PairwiseListMatrices.getlist(blmi[1])
@test PairwiseListMatrices.getlist(busl[2]) ≈
PairwiseListMatrices.getlist(blmi[2])
end
@testset "Gaoetal2011" begin
msa = read_file(Gaoetal2011, FASTA)
busl = buslje09(msa, lambda = 0.0, samples = 0)
blmi = BLMI(msa, lambda = 0.0, beta = 0.0, samples = 5)
# BLMI should be equal to Buslje09 if beta is zero
@test PairwiseListMatrices.getlist(busl[2]) ≈
PairwiseListMatrices.getlist(blmi[2]) # MIapc
@test msa == read_file(Gaoetal2011, FASTA)
end
@testset "Gaoetal2011, lambda 0.05" begin
msa = read_file(Gaoetal2011, FASTA)
busl = buslje09(msa, lambda = 0.5, samples = 0)
blmi = BLMI(msa, lambda = 0.5, beta = 0.0, samples = 5)
# BLMI should be equal to Buslje09 if beta is zero
@test PairwiseListMatrices.getlist(busl[2]) ≈
PairwiseListMatrices.getlist(blmi[2]) # MIapc
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 8522 | @testset "Counters" begin
@testset "frequencies and probabilities" begin
seq = res"ARNDCQEGHILKMFPSTWYV-"
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
seqs = ((seq for i = 1:N)...,)::NTuple{N,Vector{Residue}}
table = ContingencyTable(Float64, Val{N}, alphabet) # zeros in MIToS 1.0
if N == 1
@test table[1] == 0.0
elseif N == 2
@test table[1, 1] == 0.0
else
@test table[1, 1, 1] == 0.0
end
@test getmarginals(table)[1, 1] == 0.0
@test gettotal(table) == 0.0
frequencies!(table, seqs...)
@test table == frequencies(seqs..., alphabet = alphabet)
if isa(alphabet, ReducedAlphabet)
@test table[1] == 5.0
@test getmarginals(table)[1] == 5.0
@test gettotal(table) ≈ 20.0 # Reduced alphabet without gap
else
@test table[1] == 1.0
@test getmarginals(table)[1] == 1.0
@test gettotal(table) ≈ length(alphabet)
if N == 2
len = length(alphabet)
@test gettablearray(table) == Matrix{Float64}(I, len, len)
@test getmarginalsarray(table)[:, 1] == [1.0 for i = 1:len]
end
end
normalize!(table)
@test table == probabilities(seqs..., alphabet = alphabet)
end
end
@testset "MSA" begin
msa = rand(Random.MersenneTwister(123), Residue, 3, 6)
Nres = frequencies(msa)
@test size(Nres) == (20,)
@test sum(Nres) == 18.0
Pres = probabilities(msa)
@test size(Pres) == (20,)
@test sum(Pres) ≈ 1.0
# Test on sequences or columns with a trailing dimension
col_a = msa[:, 1:1]
col_b = msa[:, 2:2]
@test size(col_a) == (3, 1)
@test isa(col_a, Matrix{Residue})
Npair = frequencies(col_a, col_b)
@test size(Npair) == (20, 20)
@test sum(Npair) == 3.0
Ppair = probabilities(col_a, col_b)
@test size(Ppair) == (20, 20)
@test sum(Ppair) ≈ 1.0
end
@testset "Using clustering" begin
clusters = Clusters([21], ones(Int, 21), Weights(ones(21) / 21))
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
seqs = ((seq for i = 1:N)...,)::NTuple{N,Vector{Residue}}
table = ContingencyTable(Float64, Val{N}, alphabet)
frequencies!(table, seqs..., weights = clusters)
@test table ==
frequencies(seqs..., alphabet = alphabet, weights = clusters)
len = length(alphabet)
if isa(alphabet, ReducedAlphabet)
@test table[1] == 5.0 / 21
@test getmarginals(table)[1] == 5.0 / 21
@test gettotal(table) ≈ (1.0 / 21) * 20.0 # ReducedAlphabet without gap
else
@test table[1] == 1.0 / 21
@test getmarginals(table)[1] == 1.0 / 21
@test gettotal(table) ≈ (1.0 / 21) * len
if N == 2
@test gettablearray(table) ==
(1.0 / 21) .* Matrix{Float64}(I, len, len)
@test getmarginalsarray(table)[:, 1] == [1.0 / 21 for i = 1:len]
end
end
normalize!(table)
@test table ==
probabilities(seqs..., alphabet = alphabet, weights = clusters)
end
end
end
@testset "Using pseudocount" begin
for alphabet in (
UngappedAlphabet(),
GappedAlphabet(),
ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"),
)
for N = 1:3
seqs = ((seq for i = 1:N)...,)::NTuple{N,Vector{Residue}}
table = ContingencyTable(Float64, Val{N}, alphabet)
frequencies!(table, seqs..., pseudocounts = AdditiveSmoothing(1.0))
@test table == frequencies(
seqs...,
alphabet = alphabet,
pseudocounts = AdditiveSmoothing(1.0),
)
len = Float64(length(alphabet))
if isa(alphabet, ReducedAlphabet)
@test table[1] == 5.0 + 1.0
@test getmarginals(table)[1] ==
5.0 + (N == 1 ? 1.0 : N == 2 ? len : len^2)
@test gettotal(table) ≈ length(gettable(table)) + 20.0 # without gap
else
@test table[1] == 2.0
@test getmarginals(table)[1] ==
1.0 + (N == 1 ? 1.0 : N == 2 ? len : len^2)
@test gettotal(table) ≈ len + length(gettable(table))
if N == 2
@test gettablearray(table) ==
Matrix{Float64}(I, Int(len), Int(len)) .+ 1.0
@test getmarginalsarray(table)[:, 1] ==
[len + 1.0 for i = 1:len]
end
end
normalize!(table)
@test table == probabilities(
seqs...,
alphabet = alphabet,
pseudocounts = AdditiveSmoothing(1.0),
)
end
end
end
@testset "pseudofrequencies" begin
table = ContingencyTable(Float64, Val{2}, UngappedAlphabet())
probabilities!(
table,
seq,
seq,
pseudofrequencies = BLOSUM_Pseudofrequencies(1.0, 0.0),
)
@test table == probabilities(
seq,
seq,
pseudofrequencies = BLOSUM_Pseudofrequencies(1.0, 0.0),
)
@test table == probabilities(seq, seq)
@test table[1] == 1.0 / 20.0
@test getmarginals(table)[1] == 1.0 / 20.0
@test gettotal(table) ≈ 1.0
@test gettablearray(table) == Matrix{Float64}(I, 20, 20) ./ 20.0
@test table != probabilities(
seq,
seq,
pseudofrequencies = BLOSUM_Pseudofrequencies(0.0, 1.0),
)
end
@testset "BigFloat" begin
table = ContingencyTable(BigFloat, Val{2}, UngappedAlphabet())
clusters = Clusters([21], ones(Int, 21), Weights(ones(21) / 21))
probabilities!(
table,
seq,
seq,
weights = clusters,
pseudocounts = AdditiveSmoothing(one(BigFloat)),
pseudofrequencies = BLOSUM_Pseudofrequencies(1.0, 1.0),
)
@test sum(table) ≈ one(BigFloat)
@test eltype(table) == BigFloat
@test isa(table[1], BigFloat)
end
@testset "delete_dimensions" begin
Pxyz = probabilities(seq, seq, seq)
@test delete_dimensions(Pxyz, 3) == probabilities(seq, seq)
@test delete_dimensions(Pxyz, 3, 2) == probabilities(seq)
Pxy = delete_dimensions(Pxyz, 3)
@test delete_dimensions!(Pxy, Pxyz, 1) == probabilities(seq, seq)
@test sum(Pxy) ≈ 1.0
Nxyz = frequencies(seq, seq, seq)
Nxy = delete_dimensions(Nxyz, 3)
@test Nxy == frequencies(seq, seq)
@test delete_dimensions(Nxyz, 3, 2) == frequencies(seq)
@test delete_dimensions!(Nxy, Nxyz, 1) == frequencies(seq, seq)
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 592 | if VERSION >= v"1.5.0"
gaussdca_installed = false
try
using Pkg
Pkg.add(
PackageSpec(
url = "https://github.com/carlobaldassi/GaussDCA.jl",
rev = "master",
),
)
gaussdca_installed = true
catch err
@warn "GaussDCA.jl not gaussdca_installed: $err"
end
if gaussdca_installed
msa = map(Residue, rand(1:21, 100, 20))
dca = gaussdca(msa, min_separation = 2)
@test isnan(dca[1, 1])
@test isnan(dca[1, 2])
@test !isnan(dca[1, 3])
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 922 | @testset "Pairwise Gap Percentage" begin
@testset "Simple" begin
file = joinpath(DATA, "simple.fasta")
mat = [
0.0 0.0
0.0 0.0
]
(gu, gi) = pairwisegapfraction(file, FASTA)
@test gu == mat
@test gi == mat
end
@testset "Gaps" begin
file = joinpath(DATA, "gaps.txt")
cl = hobohmI(read_file(file, Raw), 62)
gu, gi = pairwisegapfraction(file, Raw)
ncl = nclusters(cl)
@test gu[1, 1] ≈ 0.0
@test gi[1, 1] ≈ 0.0
@test gu[1, 2] ≈ 100.0 * getweight(cl, 10) / ncl
@test gi[1, 2] ≈ 0.0
@test gu[10, 9] ≈ 100.0 * (ncl - getweight(cl, 1)) / ncl
@test gi[10, 9] ≈ 100.0 * (ncl - getweight(cl, 1) - getweight(cl, 2)) / ncl
@test gu[10, 10] ≈ 100.0 * (ncl - getweight(cl, 1)) / ncl
@test gu[10, 10] ≈ 100.0 * (ncl - getweight(cl, 1)) / ncl
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 7061 | @testset "Information Measures" begin
s = res"ARNDCQEGHILKMFPSTWYV-"
r = reverse(s)
g = res"GGGGGGGGGGGGGGGGGGGGG"
Pg = probabilities(g)
Pgg = probabilities(g, g)
Pggg = probabilities(g, g, g)
Ps = probabilities(s)
Pss = probabilities(s, s)
Psr = probabilities(s, r)
Psss = probabilities(s, s, s)
count_random = frequencies(Residue[], Residue[], pseudocounts = AdditiveSmoothing(1.0))
prob_random = probabilities(Residue[], Residue[], pseudocounts = AdditiveSmoothing(1.0))
@testset "Entropy" begin
@testset "H(X)" begin
@test shannon_entropy(Pg) ≈ 0.0
@test shannon_entropy(Ps) ≈ log(20.0)
@test shannon_entropy(Ps, base = 2) ≈ log(2, 20.0)
end
@testset "Joint Entropy: H(X,Y)" begin
@test shannon_entropy(Pgg) ≈ 0.0
@test shannon_entropy(Pss) ≈ log(20.0)
@test shannon_entropy(Psr) ≈ log(19.0)
@test shannon_entropy(Pss, base = 2) ≈ log(2, 20.0)
@test shannon_entropy(count_random) ≈ log(400.0)
@test shannon_entropy(prob_random) ≈ log(400.0)
end
@testset "Joint Entropy: H(X,Y,Z)" begin
@test shannon_entropy(Pggg) ≈ 0.0
@test shannon_entropy(Psss) ≈ log(20)
end
@testset "Using counts" begin
@test shannon_entropy(frequencies(g, g)) ≈ shannon_entropy(Pgg)
@test shannon_entropy(frequencies(s, s)) ≈ shannon_entropy(Pss)
@test shannon_entropy(frequencies(s, r)) ≈ shannon_entropy(Psr)
end
end
@testset "Kullback-Leibler" begin
@test kullback_leibler(Ps, background = [1.0 / 20.0 for i = 1:20]) ≈ 0.0
@test kullback_leibler(Ps, background = Ps) ≈ 0.0
@test kullback_leibler(Ps, background = BLOSUM62_Pi) ≈ kullback_leibler(Ps)
@test kullback_leibler(Ps, background = BLOSUM62_Pi) ≈
mapreduce(i -> 0.05 * log(0.05 / BLOSUM62_Pi[i]), +, 1:20)
@test kullback_leibler(Psr, background = Psr) ≈ 0.0
end
@testset "Mutual Information" begin
@test mutual_information(Pgg) ≈ 0.0
P = gettablearray(prob_random)
Q = getmarginalsarray(prob_random)[:, 1] * getmarginalsarray(prob_random)[:, 2]' # 0.002500000000000001
@test mutual_information(prob_random) ≈ sum(P .* log.(P ./ Q)) # So, it's != 0.0
@test mutual_information(count_random) ≈ 0.0 # It's more accurate
Pgs = probabilities(g, s)
P = gettablearray(Pgs)
Q = getmarginalsarray(Pgs)[:, 1] * getmarginalsarray(Pgs)[:, 2]' # 0.05000000000000002
@test mutual_information(Pgs) ≈
mapreduce(x -> isnan(x) ? 0.0 : x, +, P .* log.(P ./ Q))
@test mutual_information(frequencies(g, s)) ≈ 0.0 # It's more accurate
# MI(X,Y)=H(X)+H(Y)-H(X,Y)
@test mutual_information(Psr) ≈ (
marginal_entropy(Psr, margin = 1) + marginal_entropy(Psr, margin = 2) -
shannon_entropy(Psr)
)
@test mutual_information(Pss) ≈ (
marginal_entropy(Pss, margin = 1) + marginal_entropy(Pss, margin = 2) -
shannon_entropy(Pss)
)
@test mutual_information(Psr, base = 2) ≈ (
marginal_entropy(Psr, margin = 1, base = 2) +
marginal_entropy(Psr, margin = 2, base = 2) - shannon_entropy(Psr, base = 2)
)
@test mutual_information(Pss, base = 20) ≈ (
marginal_entropy(Pss, margin = 1, base = 20) +
marginal_entropy(Pss, margin = 2, base = 20) - shannon_entropy(Pss, base = 20)
)
@test isapprox(mutual_information(prob_random), 0.0, atol = 1e-15)
@test mutual_information(count_random) ≈ 0.0
@test isapprox(
mutual_information(count_random),
marginal_entropy(count_random, margin = 1) +
marginal_entropy(count_random, margin = 2) - shannon_entropy(count_random),
atol = 1e-13,
)
@testset "Using counts" begin
@test mutual_information(count_random) ≈ 0.0
@test mutual_information(frequencies(g, g)) ≈ mutual_information(Pgg)
@test mutual_information(frequencies(s, s)) ≈ mutual_information(Pss)
@test mutual_information(frequencies(s, r)) ≈ mutual_information(Psr)
end
@testset "MI(X,Y,Z)" begin
@test mutual_information(Pggg) ≈ 0.0
@test mutual_information(probabilities(g, s, r)) ≈ 0.0
@test mutual_information(probabilities(s, s, r)) ≈
mutual_information(frequencies(s, s, r))
# MI(X,Y,Z) = H(X) + H(Y) + H(Z) - H(X,Y) - H(X,Z) - H(Y,Z) + H(X,Y,Z)
@test mutual_information(Psss) ≈ mutual_information(frequencies(s, s, s))
@test mutual_information(Psss) ≈ (
marginal_entropy(Psss, margin = 1) +
marginal_entropy(Psss, margin = 2) +
marginal_entropy(Psss, margin = 3) - shannon_entropy(Pss) -
shannon_entropy(Pss) - shannon_entropy(Pss) + shannon_entropy(Psss)
)
# MI(X,Y,Z) <= min{ H(X,Y), H(X,Z), H(Y,Z) }
@test mutual_information(Psss) ≈ mutual_information(Pss)
end
@testset "Pairwise Gap Percentage" begin
@test gap_union_percentage(
frequencies(res"AA--", res"--AA", alphabet = GappedAlphabet()),
) ≈ 100.0
@test gap_intersection_percentage(
frequencies(res"AA--", res"--AA", alphabet = GappedAlphabet()),
) ≈ 0.0
@test gap_union_percentage(
frequencies(
res"AA--",
res"--AA",
alphabet = GappedAlphabet(),
weights = Weights([0.25, 0.25, 0.25, 0.25]),
),
) ≈ 100.0
@test gap_intersection_percentage(
frequencies(
res"AA--",
res"--AA",
alphabet = GappedAlphabet(),
weights = Weights([0.25, 0.25, 0.25, 0.25]),
),
) ≈ 0.0
@test gap_union_percentage(
frequencies(res"AAA-", res"AA--", alphabet = GappedAlphabet()),
) ≈ 50.0
@test gap_intersection_percentage(
frequencies(res"AAA-", res"AA--", alphabet = GappedAlphabet()),
) ≈ 25.0
@test gap_union_percentage(
frequencies(
res"AAA-",
res"AA--",
alphabet = GappedAlphabet(),
weights = Weights([0.2, 0.2, 0.2, 0.4]),
),
) ≈ 60.0
@test gap_intersection_percentage(
frequencies(
res"AAA-",
res"AA--",
alphabet = GappedAlphabet(),
weights = Weights([0.2, 0.2, 0.2, 0.4]),
),
) ≈ 40.0
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4464 | @testset "Iterations" begin
@testset "NMI" begin
# This is the example of MI(X, Y)/H(X, Y) from:
#
# Gao, H., Dou, Y., Yang, J., & Wang, J. (2011).
# New methods to measure residues coevolution in proteins.
# BMC bioinformatics, 12(1), 206.
aln = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA)
result = Float64[
0 0 0 0 0 0
0 0 0 0 0 0
0 0 0 1 1 0.296
0 0 1 0 1 0.296
0 0 1 1 0 0.296
0 0 0.296 0.296 0.296 0
]
nmi = mapcolpairfreq!(
normalized_mutual_information,
aln,
Frequencies(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
usediagonal = false,
)
nmi_mat = convert(Matrix{Float64}, getarray(nmi))
@test isapprox(nmi_mat, result, rtol = 1e-4)
nmi_t = mapseqpairfreq!(
normalized_mutual_information,
permutedims(aln),
Frequencies(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
usediagonal = false,
)
@test nmi_mat == convert(Matrix{Float64}, getarray(nmi_t))
end
@testset "Gaps" begin
function _gaps(
table::Union{
Probabilities{Float64,1,GappedAlphabet},
Frequencies{Float64,1,GappedAlphabet},
},
)
table[GAP]
end
table = ContingencyTable(Float64, Val{1}, GappedAlphabet())
gaps = read_file(joinpath(DATA, "gaps.txt"), Raw)
# THAYQAIHQV 0
# THAYQAIHQ- 0.1
# THAYQAIH-- 0.2
# THAYQAI--- 0.3
# THAYQA---- 0.4
# THAYQ----- 0.5
# THAY------ 0.6
# THA------- 0.7
# TH-------- 0.8
# T--------- 0.9
ngaps = Float64[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
colcount = mapcolfreq!(_gaps, gaps, Frequencies(table))
@test all((vec(getarray(colcount)) .- ngaps) .== 0.0)
colfract = mapcolfreq!(_gaps, gaps, Probabilities(table))
@test all((vec(getarray(colfract)) .- ngaps ./ 10.0) .== 0.0)
seqcount = mapseqfreq!(_gaps, gaps, Frequencies(table))
@test all((vec(getarray(seqcount)) .- ngaps) .== 0.0)
seqfract = mapseqfreq!(_gaps, gaps, Probabilities(table))
@test all((vec(getarray(seqfract)) .- ngaps ./ 10.0) .== 0.0)
end
@testset "Passing keyword arguments" begin
# Dummy function that returns the value of the keyword argument `karg` for testing
f(table; karg::Float64 = 0.0) = karg
msa = rand(Random.MersenneTwister(123), Residue, 4, 10)
table_1d = Frequencies(ContingencyTable(Float64, Val{1}, UngappedAlphabet()))
table_2d = Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet()))
@test sum(mapseqfreq!(f, msa, deepcopy(table_1d))) == 0.0
@test sum(mapseqfreq!(f, msa, deepcopy(table_1d), karg = 1.0)) == 4.0
@test sum(mapcolfreq!(f, msa, deepcopy(table_1d))) == 0.0
@test sum(mapcolfreq!(f, msa, deepcopy(table_1d), karg = 1.0)) == 10.0
@test sum(mapseqpairfreq!(f, msa, deepcopy(table_2d))) == 0
@test sum(mapseqpairfreq!(f, msa, deepcopy(table_2d), karg = 1.0)) == 16.0
@test sum(mapcolpairfreq!(f, msa, deepcopy(table_2d))) == 0
@test sum(mapcolpairfreq!(f, msa, deepcopy(table_2d), karg = 1.0)) == 100.0
end
@testset "mapfreq" begin
# test mapfreq using the sum function
msa = rand(Random.MersenneTwister(123), Residue, 4, 10)
sum_11 = mapfreq(sum, msa, rank = 1, dims = 1)
@test sum(sum_11) ≈ 4.0
@test size(sum_11) == (4, 1)
sum_12 = mapfreq(sum, msa, rank = 1, dims = 2)
@test sum(sum_12) ≈ 10.0
@test size(sum_12) == (1, 10)
sum_21 = mapfreq(sum, msa, rank = 2, dims = 1)
@test isnan(sum(sum_21))
@test sum(i for i in sum_21 if !isnan(i)) ≈ 12 # 4 * (4-1)
@test size(sum_21) == (4, 4)
sum_22 = mapfreq(sum, msa, rank = 2, dims = 2)
@test isnan(sum(sum_22))
@test sum(i for i in sum_22 if !isnan(i)) ≈ 90 # 10 * (10-1)
@test size(sum_22) == (10, 10)
# the default is rank = 1 and dims = 2
@test mapfreq(sum, msa, rank = 1, dims = 2) == mapfreq(sum, msa)
# probabilities=false
@test sum(mapfreq(sum, msa, dims = 1, probabilities = false)) ≈ 4 * 10.0
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2619 | @testset "Alphabet" begin
@testset "Creation, Iteration and getindex" begin
# Creation & Iteration
@test [i for i in UngappedAlphabet()] == Int[i for i = 1:20]
@test [i for i in GappedAlphabet()] == Int[i for i = 1:21]
@test [i for i in ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP")] == Int[i for i = 1:8]
end
@testset "getindex and names" begin
reduced = ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP")
strings = ["AILMV", "NQST", "RHK", "DE", "FWY", "C", "G", "P"]
@test length(reduced) == length(strings)
@test names(reduced) == strings
for i = 1:length(reduced)
@test reduced[strings[i]] == i
end
strings_gapped = [
"A",
"R",
"N",
"D",
"C",
"Q",
"E",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
"-",
]
@test names(GappedAlphabet()) == strings_gapped
@test names(UngappedAlphabet()) == strings_gapped[1:end-1]
for i = 1:20
@test UngappedAlphabet()[strings_gapped[i]] == i
@test GappedAlphabet()[strings_gapped[i]] == i
end
@test GappedAlphabet()["-"] == 21
@test UngappedAlphabet()["-"] == 22
end
@testset "in" begin
# Creation & Iteration
@test in(Residue('A'), UngappedAlphabet())
@test in(Residue('A'), GappedAlphabet())
@test in(Residue('A'), ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"))
@test !in(GAP, UngappedAlphabet())
@test in(GAP, GappedAlphabet())
@test !in(GAP, ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"))
@test !in(XAA, UngappedAlphabet())
@test !in(XAA, GappedAlphabet())
@test !in(XAA, ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"))
end
@testset "Show" begin
tmp = IOBuffer()
show(tmp, UngappedAlphabet())
@test String(take!(tmp)) ==
"UngappedAlphabet of length 20. Residues : res\"ARNDCQEGHILKMFPSTWYV\""
show(tmp, GappedAlphabet())
@test String(take!(tmp)) ==
"GappedAlphabet of length 21. Residues : res\"ARNDCQEGHILKMFPSTWYV-\""
show(tmp, ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP"))
@test String(take!(tmp)) ==
"ReducedAlphabet of length 8 : \"(AILMV)(NQST)(RHK)(DE)(FWY)CGP\""
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 11940 | @testset "Annotations" begin
@testset "Empty annotations" begin
annot = Annotations()
@test length(annot.file) == 0
@test length(annot.sequences) == 0
@test length(annot.columns) == 0
@test length(annot.residues) == 0
@test length(annot) == 0
@test ncolumns(annot) == -1
@test isempty(annot)
end
@testset "Getters & Setters" begin
annot = Annotations()
example_str = "CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH"
setannotresidue!(annot, "O31698/18-71", "SS", example_str)
@test_throws AssertionError setannotresidue!(
annot,
"O31698/18-71",
String(rand('A':'Z', 51)),
example_str,
)
@test_throws AssertionError setannotresidue!(
annot,
"O31698/18-71",
"Feature Name",
example_str,
)
@test ncolumns(annot) == 37
setannotfile!(annot, "AC", "PF00571")
setannotcolumn!(annot, "SS_cons", example_str)
setannotsequence!(annot, "O31698/88-139", "OS", "Bacillus subtilis")
@test getannotfile(annot, "AC") == "PF00571"
@test getannotcolumn(annot, "SS_cons") == example_str
@test getannotsequence(annot, "O31698/88-139", "OS") == "Bacillus subtilis"
@test getannotresidue(annot, "O31698/18-71", "SS") == example_str
@test getannotfile(annot, "An", "Default") == "Default"
@test getannotcolumn(annot, "Other", "Default") == "Default"
@test getannotsequence(annot, "O31698/1-88", "OS", "Default") == "Default"
@test getannotresidue(annot, "O31698/1-88", "SS", "Default") == "Default"
@test ncolumns(annot) == 37
@test_throws DimensionMismatch setannotresidue!(
annot,
"O31698/18-71",
"AS",
"__*__",
)
@test_throws DimensionMismatch setannotcolumn!(
annot,
"SS_cons",
"---CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH---",
)
end
@testset "Copy, deepcopy and empty!" begin
annot = Annotations()
setannotresidue!(
annot,
"O31698/18-71",
"SS",
"CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH",
)
setannotfile!(annot, "AC", "PF00571")
setannotcolumn!(annot, "SS_cons", "CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH")
setannotsequence!(annot, "O31698/88-139", "OS", "Bacillus subtilis")
copy_annot = copy(annot)
@test copy_annot == annot
empty!(copy_annot)
@test ncolumns(annot) == 37
@test ncolumns(copy_annot) == -1
@test !isempty(annot)
@test isempty(copy_annot)
deepcopy_annot = deepcopy(annot)
@test deepcopy_annot == annot
empty!(deepcopy_annot)
@test ncolumns(annot) == 37
@test ncolumns(deepcopy_annot) == -1
@test !isempty(annot)
@test isempty(deepcopy_annot)
end
@testset "Filter" begin
@testset "Filter helpers" begin
str_col = "abcd"
str_map = "11,12,13,14"
selector = [4, 3, 1]
@test MSA._filter(str_col, selector) == "dca"
@test MSA._filter_mapping(str_map, selector) == "14,13,11"
end
annot = Annotations()
setannotresidue!(
annot,
"O31698/18-71",
"SS",
"CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH",
)
setannotfile!(annot, "AC", "PF00571")
setannotcolumn!(annot, "SS_cons", "CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH")
setannotsequence!(annot, "O31698/88-139", "OS", "Bacillus subtilis")
@test_throws AssertionError filtercolumns!(annot, [true, false, true])
#filtersequences!(annot, IndexedArray(["O31698/88-139", "O31698/18-71"]), [false, true])
#@test length( getannotsequence(annot) ) == 0
#filtersequences!(annot, IndexedArray(["O31698/88-139", "O31698/18-71"]), [true, false])
#@test length( getannotresidue(annot) ) == 0
mask = collect("CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH") .!= Ref('E')
filtercolumns!(annot, mask)
@test ncolumns(annot) == 19
@test getannotcolumn(annot, "SS_cons") == "CCCCCHHHHHHHHHHHHHH"
filtercolumns!(annot, [1, 2, 19])
@test ncolumns(annot) == 3
@test getannotcolumn(annot, "SS_cons") == "CCH"
end
@testset "_rename_sequences" begin
# Create an Annotations object with specific annotations
annot = Annotations()
setannotresidue!(
annot,
"O31698/18-71",
"SS",
"CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH",
)
setannotresidue!(
annot,
"O31698/72-140",
"SS",
"HHHHCCCCCEEEEEEEECCCCCCHHHHHHHHHHHHHH",
)
setannotfile!(annot, "AC", "PF00571")
setannotcolumn!(annot, "SS_cons", "CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH")
setannotsequence!(annot, "O31698/88-139", "OS", "Bacillus subtilis")
setannotsequence!(annot, "O31698/20-80", "OS", "Bacillus subtilis")
# Define the old to new sequence name mapping
old2new = Dict("O31698/18-71" => "NewSeq1", "O31698/88-139" => "NewSeq2")
# Call the function with the annotations and mapping
new_annotations = MSA._rename_sequences(annot, old2new)
# Test cases
# Check if the sequence names are correctly updated
@test getannotresidue(new_annotations, "NewSeq1", "SS") ==
"CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH"
@test getannotsequence(new_annotations, "NewSeq2", "OS") == "Bacillus subtilis"
# Ensure that file and column annotations are unchanged
@test getannotfile(new_annotations, "AC") == "PF00571"
@test getannotcolumn(new_annotations, "SS_cons") ==
"CCCCCHHHHHHHHHHHHHEEEEEEEEEEEEEEEEEEH"
# Check that the old sequence names are no longer present
@test isempty(getannotresidue(new_annotations, "O31698/18-71", "SS", ""))
@test isempty(getannotsequence(new_annotations, "O31698/88-139", "OS", ""))
# Check that sequences not in the mapping remain unchanged
@test getannotresidue(new_annotations, "O31698/72-140", "SS") ==
"HHHHCCCCCEEEEEEEECCCCCCHHHHHHHHHHHHHH"
@test getannotsequence(new_annotations, "O31698/20-80", "OS") == "Bacillus subtilis"
end
@testset "merge" begin
@testset "different sources" begin
original = Annotations(OrderedDict("key1" => "value1"), Dict(), Dict(), Dict())
source1 = Annotations(OrderedDict("key2" => "value2"), Dict(), Dict(), Dict())
source2 = Annotations(OrderedDict("key3" => "value3"), Dict(), Dict(), Dict())
# Test merge without modifying the original
merged = merge(original, source1, source2)
@test length(merged.file) == 3
@test merged.file["key1"] == "value1"
@test merged.file["key2"] == "value2"
@test merged.file["key3"] == "value3"
# Original should not be altered by merge
@test length(original.file) == 1
@test original.file["key1"] == "value1"
# Test merge!
merge!(original, source1, source2)
@test original == merged
end
@testset "overlapping keys" begin
original =
Annotations(OrderedDict("key" => "targetValue"), Dict(), Dict(), Dict())
source1 =
Annotations(OrderedDict("key" => "source1Value"), Dict(), Dict(), Dict())
source2 =
Annotations(OrderedDict("key" => "source2Value"), Dict(), Dict(), Dict())
# Test merge without modifying the original
merged = merge(original, source1, source2)
@test length(merged.file) == 1
@test merged.file["key"] == "source2Value"
# Original should not be altered by merge
@test length(original.file) == 1
@test original.file["key"] == "targetValue"
# Test merge!
merge!(original, source1, source2)
@test original == merged
end
@testset "empty Annotations" begin
target = Annotations(
OrderedDict("file_key1" => "file_value1"),
Dict(),
Dict(),
Dict(),
)
empty_source = Annotations()
merge!(target, empty_source)
@test length(target.file) == 1
@test target.file["file_key1"] == "file_value1"
@test isempty(target.sequences)
@test isempty(target.columns)
@test isempty(target.residues)
end
@testset "partial overlap" begin
target = Annotations(OrderedDict("key1" => "value1"), Dict(), Dict(), Dict())
source = Annotations(
OrderedDict("key1" => "new_value1", "key2" => "value2"),
Dict(),
Dict(),
Dict(),
)
merge!(target, source)
@test length(target.file) == 2
@test target.file["key1"] == "new_value1"
@test target.file["key2"] == "value2"
end
@testset "all annotation fields" begin
target = Annotations(
OrderedDict("file_key" => "file_value"),
Dict(("sequence_name", "annot_name") => "sequence_value"),
Dict("column_key" => "column_value"),
Dict(("residue_name", "residue_annot") => "residue_value"),
)
source = Annotations(
OrderedDict("file_key" => "new_file_value"),
Dict(("sequence_name", "annot_name") => "new_sequence_value"),
Dict("column_key" => "new_column_value"),
Dict(("residue_name", "residue_annot") => "new_residue_value"),
)
merge!(target, source)
@test target.file["file_key"] == "new_file_value"
@test target.sequences[("sequence_name", "annot_name")] == "new_sequence_value"
@test target.columns["column_key"] == "new_column_value"
@test target.residues[("residue_name", "residue_annot")] == "new_residue_value"
end
end
@testset "_rename_sequences" begin
# Create a sample Annotations object with predefined annotations
annotations = Annotations()
setannotresidue!(annotations, "Seq1", "AnnotType1", "Value1")
setannotresidue!(annotations, "Seq2", "AnnotType2", "Value2")
setannotsequence!(annotations, "Seq1", "AnnotType3", "Value3")
setannotsequence!(annotations, "Seq3", "AnnotType4", "Value4")
setannotfile!(annotations, "FileKey", "FileValue")
setannotcolumn!(annotations, "ColumnKey", "ValueX")
# Define the old to new sequence name mapping
old2new = Dict("Seq1" => "NewSeq1", "Seq2" => "NewSeq2")
# Call the function with the annotations and mapping
new_annotations = MSA._rename_sequences(annotations, old2new)
# Test cases
# Check if the sequence names are correctly updated
@test getannotresidue(new_annotations, "NewSeq1", "AnnotType1") == "Value1"
@test getannotresidue(new_annotations, "NewSeq2", "AnnotType2") == "Value2"
@test getannotsequence(new_annotations, "NewSeq1", "AnnotType3") == "Value3"
# Check if sequence names not in the mapping are retained
@test getannotsequence(new_annotations, "Seq3", "AnnotType4") == "Value4"
# Ensure that file and column annotations are unchanged
@test getannotfile(new_annotations, "FileKey") == "FileValue"
@test getannotcolumn(new_annotations, "ColumnKey") == "ValueX"
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 68812 | @testset "hcat" begin
simple = joinpath(DATA, "simple.fasta")
msa = read_file(simple, FASTA, generatemapping = true)
setannotresidue!(msa, "ONE", "example", "ab")
setannotresidue!(msa, "TWO", "example", "cd")
setannotresidue!(msa, "ONE", "OnlyONE", "xx")
setannotresidue!(msa, "TWO", "OnlyTWO", "yy")
msa_2 = copy(msa)
setannotcolumn!(msa_2, "example", "HE")
same_ref = Residue[
'A' 'R' 'A' 'R'
'R' 'A' 'R' 'A'
]
diff_ref = Residue[
'A' 'R' 'R' 'A'
'R' 'A' 'A' 'R'
]
annot_same = hcat(msa, msa_2)
annot_diff = hcat(msa_2, msa[[2, 1], :])
msa_same = hcat(MultipleSequenceAlignment(msa), MultipleSequenceAlignment(msa))
msa_diff =
hcat(MultipleSequenceAlignment(msa), MultipleSequenceAlignment(msa)[[2, 1], :])
@testset "Same sequence names" begin
for concatenated_msa in (annot_same, msa_same)
@test size(concatenated_msa) == (2, 4)
@test concatenated_msa == same_ref
@test sequencenames(concatenated_msa) == ["ONE", "TWO"]
@test columnnames(concatenated_msa) == ["1_1", "1_2", "2_1", "2_2"]
@test getcolumnmapping(concatenated_msa) == [1, 2, 1, 2]
if concatenated_msa isa AnnotatedMultipleSequenceAlignment
@test getsequencemapping(concatenated_msa, "ONE") == [1, 2, 1, 2]
@test getsequencemapping(concatenated_msa, "TWO") == [1, 2, 1, 2]
@test getannotresidue(concatenated_msa, "ONE", "example") == "abab"
@test getannotresidue(concatenated_msa, "TWO", "example") == "cdcd"
@test getannotresidue(concatenated_msa, "ONE", "OnlyONE") == "xxxx"
@test getannotresidue(concatenated_msa, "TWO", "OnlyTWO") == "yyyy"
@test getannotcolumn(concatenated_msa, "example") == " HE"
end
@test gethcatmapping(concatenated_msa) == [1, 1, 2, 2]
end
end
@testset "Different sequence names" begin
for concatenated_msa in (annot_diff, msa_diff)
@test size(concatenated_msa) == (2, 4)
@test concatenated_msa == diff_ref
@test sequencenames(concatenated_msa) == ["ONE_&_TWO", "TWO_&_ONE"]
@test columnnames(concatenated_msa) == ["1_1", "1_2", "2_1", "2_2"]
@test getcolumnmapping(concatenated_msa) == [1, 2, 1, 2]
if concatenated_msa isa AnnotatedMultipleSequenceAlignment
@test getsequencemapping(concatenated_msa, "ONE_&_TWO") == [1, 2, 1, 2]
@test getsequencemapping(concatenated_msa, "TWO_&_ONE") == [1, 2, 1, 2]
@test getannotresidue(concatenated_msa, "ONE_&_TWO", "example") == "abcd"
@test getannotresidue(concatenated_msa, "TWO_&_ONE", "example") == "cdab"
@test getannotresidue(concatenated_msa, "ONE_&_TWO", "OnlyONE") == "xx "
@test getannotresidue(concatenated_msa, "ONE_&_TWO", "OnlyTWO") == " yy"
@test getannotresidue(concatenated_msa, "TWO_&_ONE", "OnlyONE") == " xx"
@test getannotresidue(concatenated_msa, "TWO_&_ONE", "OnlyTWO") == "yy "
@test getannotcolumn(concatenated_msa, "example") == "HE "
end
@test gethcatmapping(concatenated_msa) == [1, 1, 2, 2]
end
end
@testset "Multiple concatenations and annot column/residue" begin
concatenated_msa = hcat(msa, msa_2, msa, msa_2, msa)
@test getannotcolumn(concatenated_msa, "example") == " HE HE "
end
@testset "IO" begin
path = tempdir()
tmp_file = joinpath(path, ".tmp.stockholm")
try
write_file(tmp_file, annot_diff, Stockholm)
out_msa = read_file(tmp_file, Stockholm)
@test columnnames(out_msa) == columnnames(annot_diff)
@test out_msa == annot_diff
finally
if isfile(tmp_file)
rm(tmp_file)
end
end
end
@testset "Inception" begin
concatenated_in = hcat(msa, msa_2)
concatenated_diff_a = hcat(msa[[2, 1], :], msa_2)
concatenated_diff_b = hcat(msa_2, msa[[2, 1], :])
@testset "concatenated concatenated" begin
concatenated_out = hcat(concatenated_in, concatenated_in)
concat_ab = hcat(concatenated_diff_a, concatenated_diff_b)
@test size(concatenated_out) == (2, 8)
@test sequencenames(concatenated_out) == ["ONE", "TWO"]
@test columnnames(concatenated_out) ==
["1_1", "1_2", "2_1", "2_2", "3_1", "3_2", "4_1", "4_2"]
@test getcolumnmapping(concatenated_out) == [1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "ONE") == [1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "TWO") == [1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "ONE") == [1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "TWO") == [1, 2, 1, 2, 1, 2, 1, 2]
@test getannotresidue(concatenated_out, "ONE", "example") == "abababab"
@test getannotresidue(concatenated_out, "TWO", "example") == "cdcdcdcd"
@test getannotresidue(concatenated_out, "ONE", "OnlyONE") == "xxxxxxxx"
@test getannotresidue(concatenated_out, "TWO", "OnlyTWO") == "yyyyyyyy"
@test getannotcolumn(concatenated_out, "example") == " HE HE"
@test gethcatmapping(concatenated_out) == [1, 1, 2, 2, 3, 3, 4, 4]
@test size(concat_ab) == (2, 8)
@test sequencenames(concat_ab) ==
["TWO_&_ONE_&_ONE_&_TWO", "ONE_&_TWO_&_TWO_&_ONE"]
@test columnnames(concat_ab) ==
["1_1", "1_2", "2_1", "2_2", "3_1", "3_2", "4_1", "4_2"]
@test getcolumnmapping(concat_ab) == [1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concat_ab, "TWO_&_ONE_&_ONE_&_TWO") ==
[1, 2, 1, 2, 1, 2, 1, 2]
@test getsequencemapping(concat_ab, "ONE_&_TWO_&_TWO_&_ONE") ==
[1, 2, 1, 2, 1, 2, 1, 2]
@test getannotresidue(concat_ab, "TWO_&_ONE_&_ONE_&_TWO", "example") ==
"cdababcd"
@test getannotresidue(concat_ab, "ONE_&_TWO_&_TWO_&_ONE", "example") ==
"abcdcdab"
@test getannotresidue(concat_ab, "TWO_&_ONE_&_ONE_&_TWO", "OnlyONE") ==
" xxxx "
@test getannotresidue(concat_ab, "TWO_&_ONE_&_ONE_&_TWO", "OnlyTWO") ==
"yy yy"
@test getannotcolumn(concat_ab, "example") == " HEHE "
@test gethcatmapping(concat_ab) == [1, 1, 2, 2, 3, 3, 4, 4]
end
@testset "concatenated non_concatenated" begin
concatenated_msas = [hcat(concatenated_in, msa), hcat(msa, concatenated_in)]
for (i, concatenated_out) in enumerate(concatenated_msas)
@test size(concatenated_out) == (2, 6)
@test sequencenames(concatenated_out) == ["ONE", "TWO"]
@test columnnames(concatenated_out) ==
["1_1", "1_2", "2_1", "2_2", "3_1", "3_2"]
@test getcolumnmapping(concatenated_out) == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "ONE") == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "TWO") == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "ONE") == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concatenated_out, "TWO") == [1, 2, 1, 2, 1, 2]
@test getannotresidue(concatenated_out, "ONE", "example") == "ababab"
@test getannotresidue(concatenated_out, "TWO", "example") == "cdcdcd"
@test getannotresidue(concatenated_out, "ONE", "OnlyONE") == "xxxxxx"
@test getannotresidue(concatenated_out, "TWO", "OnlyTWO") == "yyyyyy"
if i == 1
@test getannotcolumn(concatenated_out, "example") == " HE "
else
@test getannotcolumn(concatenated_out, "example") == " HE"
end
@test gethcatmapping(concatenated_out) == [1, 1, 2, 2, 3, 3]
end
concat_a = hcat(concatenated_diff_a, msa)
@test size(concat_a) == (2, 6)
@test sequencenames(concat_a) == ["TWO_&_ONE_&_ONE", "ONE_&_TWO_&_TWO"]
@test columnnames(concat_a) == ["1_1", "1_2", "2_1", "2_2", "3_1", "3_2"]
@test getcolumnmapping(concat_a) == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concat_a, "TWO_&_ONE_&_ONE") == [1, 2, 1, 2, 1, 2]
@test getsequencemapping(concat_a, "ONE_&_TWO_&_TWO") == [1, 2, 1, 2, 1, 2]
@test getannotresidue(concat_a, "TWO_&_ONE_&_ONE", "example") == "cdabab"
@test getannotresidue(concat_a, "ONE_&_TWO_&_TWO", "example") == "abcdcd"
@test getannotresidue(concat_a, "TWO_&_ONE_&_ONE", "OnlyONE") == " xxxx"
@test getannotresidue(concat_a, "TWO_&_ONE_&_ONE", "OnlyTWO") == "yy "
@test getannotcolumn(concat_a, "example") == " HE "
@test gethcatmapping(concat_a) == [1, 1, 2, 2, 3, 3]
end
end
end
@testset "vcat" begin
msa = read_file(joinpath(DATA, "simple.fasta"), FASTA, generatemapping = true)
msa2 = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA, generatemapping = true)
@testset "seqnames" begin
seqnames = sequencenames(msa)
seqnames2 = sequencenames(msa2)
new_names, label_map = MIToS.MSA._v_concatenated_seq_names(msa, msa)
@test new_names == ["1_ONE", "1_TWO", "2_ONE", "2_TWO"]
@test isempty(label_map) # the are no previous prefixes/labels
new_names, label_map = MIToS.MSA._v_concatenated_seq_names(msa, msa, msa)
@test new_names == ["1_ONE", "1_TWO", "2_ONE", "2_TWO", "3_ONE", "3_TWO"]
@test isempty(label_map)
new_names, label_map = MIToS.MSA._v_concatenated_seq_names(msa, msa2)
@test new_names == vcat(["1_$n" for n in seqnames], ["2_$n" for n in seqnames2])
@test isempty(label_map)
new_names, label_map = MIToS.MSA._v_concatenated_seq_names(msa2, msa)
@test new_names == vcat(["1_$n" for n in seqnames2], ["2_$n" for n in seqnames])
@test isempty(label_map)
@testset "Sequence name mapping" begin
new_names, label_mapping = MIToS.MSA._v_concatenated_seq_names(msa, msa)
mapping = MIToS.MSA._get_seqname_mapping_vcat(new_names, msa, msa)
@test isempty(label_mapping) # the are no previous prefixes/labels
@test length(mapping) == 4
@test mapping[(1, "ONE")] == "1_ONE"
@test mapping[(1, "TWO")] == "1_TWO"
@test mapping[(2, "ONE")] == "2_ONE"
@test mapping[(2, "TWO")] == "2_TWO"
end
end
@testset "vcat examples" begin
setannotresidue!(msa, "ONE", "res_example", "ab")
setannotcolumn!(msa, "example", "HE")
concatenated_11 = vcat(msa, msa)
setannotfile!(msa, "file_example", "file annotation")
setannotsequence!(msa, "ONE", "seq_example", "seq annotation")
msa_2 = copy(msa)
setannotfile!(msa_2, "file_example_msa2", "file annotation msa2")
setannotsequence!(msa_2, "ONE", "seq_example_msa2", "seq annotation msa2")
setannotresidue!(msa_2, "ONE", "res_example_msa2", "AB")
setannotcolumn!(msa_2, "example_msa2", "he")
concatenated = vcat(msa, msa_2)
# matrix
@test size(concatenated) == (4, 2)
@test concatenated == Residue[
'A' 'R'
'R' 'A'
'A' 'R'
'R' 'A'
]
# names
@test sequencenames(concatenated) == ["1_ONE", "1_TWO", "2_ONE", "2_TWO"]
@test columnnames(concatenated) == ["1", "2"]
@testset "unannotated aligned objects" begin
msa_unannot = MultipleSequenceAlignment(msa)
vcat_unannot = vcat(msa_unannot, msa_unannot)
@test size(vcat_unannot) == (4, 2)
@test vcat_unannot == Residue[
'A' 'R'
'R' 'A'
'A' 'R'
'R' 'A'
]
@test sequencenames(vcat_unannot) == ["1_ONE", "1_TWO", "2_ONE", "2_TWO"]
@test columnnames(vcat_unannot) == ["1", "2"]
end
@testset "vcat annotations" begin
# sequence annotations: disambiguated by msa number (prefix)
@test getannotsequence(concatenated, "1_ONE", "seq_example") == "seq annotation"
@test getannotsequence(concatenated, "2_ONE", "seq_example_msa2") ==
"seq annotation msa2"
@test getsequencemapping(concatenated, "1_ONE") == [1, 2]
@test getsequencemapping(concatenated, "2_ONE") == [1, 2]
# residue annotations: disambiguated by sequence name
@test getannotresidue(concatenated, "1_ONE", "res_example") == "ab"
@test getannotresidue(concatenated, "2_ONE", "res_example") == "ab"
@test getannotresidue(concatenated, "2_ONE", "res_example_msa2") == "AB"
# column annotations: disambiguated by msa number (prefix)
@test getannotcolumn(concatenated, "1_example") == "HE"
@test getannotcolumn(concatenated, "2_example") == "HE"
@test getannotcolumn(concatenated, "2_example_msa2") == "he"
end
@testset "Inception" begin
concatenated_111 = vcat(msa, concatenated_11)
@test size(concatenated_111) == (6, 2)
@test concatenated_111 == Residue[
'A' 'R'
'R' 'A'
'A' 'R'
'R' 'A'
'A' 'R'
'R' 'A'
]
@test sequencenames(concatenated_111) ==
["1_ONE", "1_TWO", "2_ONE", "2_TWO", "3_ONE", "3_TWO"]
@test columnnames(concatenated_111) == ["1", "2"]
# sequence annotations: disambiguated by msa number (prefix)
@test getannotsequence(concatenated_111, "1_ONE", "SeqMap") == "1,2"
@test getannotsequence(concatenated_111, "2_ONE", "SeqMap") == "1,2"
@test getannotsequence(concatenated_111, "3_ONE", "SeqMap") == "1,2"
# residue annotations: disambiguated by sequence name
@test getannotresidue(concatenated_111, "1_ONE", "res_example") == "ab"
@test getannotresidue(concatenated_111, "2_ONE", "res_example") == "ab"
@test getannotresidue(concatenated_111, "3_ONE", "res_example") == "ab"
# file annotations: disambiguated by msa number (prefix)
@test getannotfile(concatenated_111, "1_NCol") == "2"
@test getannotfile(concatenated_111, "2_NCol") == "2"
@test getannotfile(concatenated_111, "3_NCol") == "2"
# column annotations: disambiguated by msa number (prefix)
@test getannotcolumn(concatenated_111, "1_example") == "HE"
@test getannotcolumn(concatenated_111, "2_example") == "HE"
@test getannotcolumn(concatenated_111, "3_example") == "HE"
end
end
end
@testset "_find_gaps" begin
@test MSA._find_gaps([2, 5, 6, 7, 8], 10) == [(2, 0), (5, 2), (11, 8)]
end
@testset "join MSAs" begin
msa = read_file(joinpath(DATA, "simple.fasta"), FASTA, generatemapping = true)
msa2 = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA, generatemapping = true)
@testset "column gaps" begin
h_gaps = MIToS.MSA._gap_columns(msa, 3)
@test all(==(GAP), h_gaps)
@test size(h_gaps) == (2, 3)
h_concatenated = hcat(msa[:, 1:1], h_gaps, msa[:, 2:2])
@test size(h_concatenated) == (2, 5)
@test h_concatenated == Residue[
'A' '-' '-' '-' 'R'
'R' '-' '-' '-' 'A'
]
@test sequencenames(h_concatenated) == ["ONE", "TWO"]
@test getcolumnmapping(h_concatenated) == [1, 0, 0, 0, 2]
end
@testset "sequence gaps" begin
v_gaps = MIToS.MSA._gap_sequences(msa, ["SEQ1", "SEQ2", "SEQ3"])
@test all(==(GAP), v_gaps)
@test size(v_gaps) == (3, 2)
v_concatenated = vcat(msa[1:1, :], v_gaps, msa[2:2, :])
@test size(v_concatenated) == (5, 2)
@test v_concatenated == Residue[
'A' 'R'
'-' '-'
'-' '-'
'-' '-'
'R' 'A'
]
vcat_seqnames = sequencenames(v_concatenated)
@test vcat_seqnames == ["1_ONE", "2_SEQ1", "2_SEQ2", "2_SEQ3", "3_TWO"]
@test getsequencemapping(v_concatenated, "1_ONE") == [1, 2]
@test getsequencemapping(v_concatenated, "2_SEQ1") == [0, 0]
end
@testset "insert gap sequences" begin
at_the_start = MIToS.MSA._insert_gap_sequences(msa, ["SEQ1", "SEQ2", "SEQ3"], 1)
in_the_middle = MIToS.MSA._insert_gap_sequences(msa, ["SEQ1", "SEQ2", "SEQ3"], 2)
at_the_end = MIToS.MSA._insert_gap_sequences(msa, ["SEQ1", "SEQ2", "SEQ3"], 3)
for gapped_msa in [at_the_start, in_the_middle, at_the_end]
@test size(gapped_msa) == (5, 2)
@test sum(gapped_msa .== GAP) == 6 # 3 x 2
@test sort(sequencenames(gapped_msa)) == ["ONE", "SEQ1", "SEQ2", "SEQ3", "TWO"]
end
# check that the annotations are the same
delete_annotated_modifications!(at_the_start)
delete_annotated_modifications!(in_the_middle)
delete_annotated_modifications!(at_the_end)
@test annotations(at_the_start) == annotations(at_the_end)
@test annotations(at_the_start) == annotations(in_the_middle)
# check the position of the gap blocks
@test sum(at_the_start[1:3, :] .== GAP) == 6
@test sum(in_the_middle[2:4, :] .== GAP) == 6
@test sum(at_the_end[3:5, :] .== GAP) == 6
end
@testset "insert gap columns" begin
at_the_start = MIToS.MSA._insert_gap_columns(msa, 3, 1)
in_the_middle = MIToS.MSA._insert_gap_columns(msa, 3, 2)
at_the_end = MIToS.MSA._insert_gap_columns(msa, 3, 3)
for gapped_msa in [at_the_start, in_the_middle, at_the_end]
@test size(gapped_msa) == (2, 5) # (2 , 2 + 3)
@test sum(gapped_msa .== GAP) == 6 # 2 x 3
@test sort(columnnames(gapped_msa))[1:2] == ["1", "2"] # the original columns
@test sum(startswith.(columnnames(gapped_msa), "gap:")) == 3 # the gap columns
end
# Check the position of the gap columns
@test sum(at_the_start[:, 1:3] .== GAP) == 6 # gap columns at start
@test sum(in_the_middle[:, 2:4] .== GAP) == 6 # gap columns in the middle
@test sum(at_the_end[:, 3:5] .== GAP) == 6 # gap columns at end
# Check if original columns are preserved correctly
@test at_the_start[:, [4, 5]] == msa
@test in_the_middle[:, [1, 5]] == msa
@test at_the_end[:, [1, 2]] == msa
# Check for correct column mapping after inserting gaps
@test getcolumnmapping(at_the_start) == [0, 0, 0, 1, 2]
@test getcolumnmapping(in_the_middle) == [1, 0, 0, 0, 2]
@test getcolumnmapping(at_the_end) == [1, 2, 0, 0, 0]
# Check for correct sequence mapping after inserting gaps
@test getsequencemapping(at_the_start, "ONE") == [0, 0, 0, 1, 2]
@test getsequencemapping(in_the_middle, "ONE") == [1, 0, 0, 0, 2]
@test getsequencemapping(at_the_end, "ONE") == [1, 2, 0, 0, 0]
# Check annotations are preserved correctly; this operation should not add or
# delete annotations
@test length(annotations(at_the_start)) == length(annotations(msa))
@test length(annotations(in_the_middle)) == length(annotations(msa))
@test length(annotations(at_the_end)) == length(annotations(msa))
@testset "MSA without NCol annotation" begin
msa_no_NCol = deepcopy(msa)
annotfile = getannotfile(msa_no_NCol)
delete!(annotfile, "NCol")
@test getannotfile(msa_no_NCol, "NCol", "") == ""
# test that no NCol annotation is added
gapped_msa = MIToS.MSA._insert_gap_columns(msa_no_NCol, 3, 1)
@test !haskey(annotations(gapped_msa).file, "NCol")
end
end
@testset "Insert gaps: Unannotated MSAs" begin
ref_gapped_msa_seq = MIToS.MSA._insert_gap_sequences(msa, ["SEQ1", "SEQ2"], 3)
ref_gapped_msa_col = MIToS.MSA._insert_gap_columns(msa, 3, 3)
for msa_unannot in [MultipleSequenceAlignment(msa), namedmatrix(msa)]
gapped_msa_seq =
MIToS.MSA._insert_gap_sequences(msa_unannot, ["SEQ1", "SEQ2"], 3)
gapped_msa_col = MIToS.MSA._insert_gap_columns(msa_unannot, 3, 3)
# tests to ensure sequences and columns are inserted correctly
@test gapped_msa_seq == ref_gapped_msa_seq
@test gapped_msa_col == ref_gapped_msa_col
# test for equal sequence and column names
@test sequencenames(gapped_msa_seq) == sequencenames(ref_gapped_msa_seq)
@test columnnames(gapped_msa_seq) == columnnames(ref_gapped_msa_seq)
@test sequencenames(gapped_msa_col) == sequencenames(ref_gapped_msa_col)
@test columnnames(gapped_msa_col)[1:2] == columnnames(ref_gapped_msa_col)[1:2]
end
end
@testset "Insert gaps: Invalid gap positions" begin
# Position less than 1
@test_throws ArgumentError MIToS.MSA._insert_gap_sequences(msa, ["SEQ1", "SEQ2"], 0)
@test_throws ArgumentError MIToS.MSA._insert_gap_columns(msa, 3, 0)
# Position greater than the number of sequences/columns are valid and used to
# insert gaps at the end of the MSA
end
@testset "Insert gaps: Single sequence/column MSAs" begin
single_seq_msa = msa[1:1, :]
single_col_msa = msa[:, 1:1]
# Test for a single sequence
gapped_seq_seq = MIToS.MSA._insert_gap_sequences(single_seq_msa, ["SEQ1"], 2) # at the end
@test size(gapped_seq_seq) == (2, ncolumns(msa))
@test gapped_seq_seq == Residue['A' 'R'; '-' '-']
gapped_seq_col = MIToS.MSA._insert_gap_columns(single_seq_msa, 1, 2) # at the middle
@test size(gapped_seq_col) == (1, 3)
@test gapped_seq_col == Residue[
'A' '-' 'R'
]
# Test for a single column
gapped_col_col = MIToS.MSA._insert_gap_columns(single_col_msa, 1, 2) # at the end
@test size(gapped_col_col) == (nsequences(msa), 2)
@test gapped_col_col == Residue['A' '-'; 'R' '-']
gapped_col_seq = MIToS.MSA._insert_gap_sequences(single_col_msa, ["SEQ1"], 2) # at the middle
@test size(gapped_col_seq) == (3, 1)
@test vec(gapped_col_seq) == Residue['A', '-', 'R']
end
@testset "_renumber_sequence_gaps" begin
# Create a mock MSA
M = rand(Residue, 5, 7)
seqnames = ["seq1", "gap:3", "seq2", "gap:1", "gap:2"]
column_names = ["col1", "col2", "col3", "col4", "col5", "col6", "col7"]
named_matrix = MSA._namedresiduematrix(M, seqnames, column_names)
annot = Annotations()
setannotsequence!(annot, "gap:1", "AnnotationType1", "AnnotationValue1")
setannotsequence!(annot, "seq1", "AnnotationType2", "AnnotationValue2")
msa = AnnotatedMultipleSequenceAlignment(named_matrix, annot)
# Apply the _renumber_sequence_gaps function
new_msa = MSA._renumber_sequence_gaps(msa)
# Test if the gap sequences are renumbered correctly
@test sequencenames(new_msa) == ["seq1", "gap:1", "seq2", "gap:2", "gap:3"]
# Verify that annotations are correctly transferred
# "gap:1" in original becomes "gap:2" in new MSA
@test getannotsequence(new_msa, "gap:2", "AnnotationType1") == "AnnotationValue1"
# now, there is no annotations for "gap:1"
@test isempty(getannotsequence(new_msa, "gap:1", "AnnotationType1", ""))
# "seq1" remains unchanged
@test getannotsequence(new_msa, "seq1", "AnnotationType2") == "AnnotationValue2"
# Verify that the rest remains the same
@test getresidues(new_msa) == getresidues(msa)
@test columnnames(new_msa) == columnnames(msa)
end
@testset "_renumber_column_gaps" begin
# Create a mock MSA
M = rand(Residue, 2, 5)
seqnames = ["seq1", "seq2"]
column_names = ["col1", "gap:3", "col2", "gap:1", "gap:2"]
named_matrix = MSA._namedresiduematrix(M, seqnames, column_names)
msa = AnnotatedMultipleSequenceAlignment(named_matrix, Annotations())
# Apply the _renumber_column_gaps function
new_msa = MSA._renumber_column_gaps(msa)
# Test if the gap columns are renumbered correctly
@test columnnames(new_msa) == ["col1", "gap:1", "col2", "gap:2", "gap:3"]
end
@testset "_insert_sorted_gaps" begin
# NOTE: The default block_position is :before and the default axis is 1
msa62 = msa2[:, 1:2]
msa26 = msa2[1:2, :]
@testset "gap sequences at the beginning and at the end" begin
#
# 1 -
# 2 -
# (3,1)
# (4,2)
# (5,3)
# (6,4)
# - 5
# - 6
#
a = MSA._insert_sorted_gaps(msa62, msa62, [3, 4, 5, 6], [1, 2, 3, 4])
@test size(a) == (8, 2)
@test all(a[1:6, :] .!= GAP)
@test all(a[7:8, :] .== GAP)
b = MSA._insert_sorted_gaps(msa62, msa62, [1, 2, 3, 4], [3, 4, 5, 6])
@test size(b) == (8, 2)
@test all(b[1:2, :] .== GAP)
@test all(b[3:8, :] .!= GAP)
end
@testset "gap columns at the beginning and at the end" begin
a = MSA._insert_sorted_gaps(msa26, msa26, [3, 4, 5, 6], [1, 2, 3, 4], axis = 2)
@test size(a) == (2, 8)
@test all(a[:, 1:6] .!= GAP)
@test all(a[:, 7:8] .== GAP)
b = MSA._insert_sorted_gaps(msa26, msa26, [1, 2, 3, 4], [3, 4, 5, 6], axis = 2)
@test size(b) == (2, 8)
@test all(b[:, 1:2] .== GAP)
@test all(b[:, 3:8] .!= GAP)
end
@testset "the unique matches are between the first and the last sequence" begin
#
# (1,1)
# 2 -
# 3 -
# 4 -
# 5 -
# - 2
# - 3
# - 4
# - 5
# (6,6)
#
a = MSA._insert_sorted_gaps(
msa62,
msa62,
[1, 6],
[1, 6],
block_position = :after,
)
@test size(a) == (10, 2)
@test all(a[1:5, :] .!= GAP)
@test all(a[6:9, :] .== GAP)
@test all(a[10:10, :] .!= GAP)
b = MSA._insert_sorted_gaps(msa62, msa62, [1, 6], [1, 6])
@test size(b) == (10, 2)
@test all(b[1:1, :] .!= GAP)
@test all(b[2:5, :] .== GAP)
@test all(b[6:10, :] .!= GAP)
end
@testset "the unique matches are between the first and the last column" begin
a = MSA._insert_sorted_gaps(
msa26,
msa26,
[1, 6],
[1, 6],
axis = 2,
block_position = :after,
)
@test size(a) == (2, 10)
@test all(a[:, 1:5] .!= GAP)
@test all(a[:, 6:9] .== GAP)
@test all(a[:, 10:10] .!= GAP)
b = MSA._insert_sorted_gaps(msa26, msa26, [1, 6], [1, 6], axis = 2) # default block_position=:before
@test size(b) == (2, 10)
@test all(b[:, 1:1] .!= GAP)
@test all(b[:, 2:5] .== GAP)
@test all(b[:, 6:10] .!= GAP)
end
@testset "gap sequences, gap sequences everywhere" begin
#
# - 1
# (1,2)
# - 3
# - 4
# (2,5)
# 3 -
# 4 -
# 5 -
# 6 -
# - 6
#
a = MSA._insert_sorted_gaps(
msa62,
msa62,
[1, 2],
[2, 5],
block_position = :after,
)
@test size(a) == (10, 2)
@test all(a[1:1, :] .== GAP)
@test all(a[2:2, :] .!= GAP)
@test all(a[3:4, :] .== GAP)
@test all(a[5:9, :] .!= GAP)
@test all(a[10:10, :] .== GAP)
b = MSA._insert_sorted_gaps(msa62, msa62, [2, 5], [1, 2])
@test size(b) == (10, 2)
@test all(b[1:5, :] .!= GAP)
@test all(b[6:9, :] .== GAP)
@test all(b[10:10, :] .!= GAP)
end
@testset "gap columns, gap columns everywhere" begin
a = MSA._insert_sorted_gaps(
msa26,
msa26,
[1, 2],
[2, 5],
axis = 2,
block_position = :after,
)
@test size(a) == (2, 10)
@test all(a[:, 1:1] .== GAP)
@test all(a[:, 2:2] .!= GAP)
@test all(a[:, 3:4] .== GAP)
@test all(a[:, 5:9] .!= GAP)
@test all(a[:, 10:10] .== GAP)
b = MSA._insert_sorted_gaps(msa26, msa26, [2, 5], [1, 2], axis = 2)
@test size(b) == (2, 10)
@test all(b[:, 1:5] .!= GAP)
@test all(b[:, 6:9] .== GAP)
@test all(b[:, 10:10] .!= GAP)
end
end
@testset "_add_gaps_in_b" begin
msa62 = msa2[:, 1:2] # msa62 for sequences test
msa26 = msa2[1:2, :] # msa26 for columns test
@testset "gaps at the beginning and at the end" begin
# 1 2 3 4 5 6 - -
# - - 1 2 3 4 5 6
# a: 1 2 3 4 5 6
# b: - - 1 2 3 4
# sequences
b = MSA._add_gaps_in_b(msa62, msa62, 3:6, 1:4)
@test size(b) == (6, 2)
@test all(b[1:2, :] .== GAP)
@test all(b[3:6, :] .!= GAP)
# columns
b = MSA._add_gaps_in_b(msa26, msa26, 3:6, 1:4, 2)
@test size(b) == (2, 6)
@test all(b[:, 1:2] .== GAP)
@test all(b[:, 3:6] .!= GAP)
# a: 3 4 5 6 - -
# b: 1 2 3 4 5 6
# sequences
a = MSA._add_gaps_in_b(msa62, msa62, 1:4, 3:6)
@test size(a) == (6, 2)
@test all(a[1:4, :] .!= GAP)
@test all(a[5:6, :] .== GAP)
# columns
a = MSA._add_gaps_in_b(msa26, msa26, 1:4, 3:6, 2)
@test size(a) == (2, 6)
@test all(a[:, 1:4] .!= GAP)
@test all(a[:, 5:6] .== GAP)
end
@testset "unique matches between first and last sequence/column" begin
# 1 2 3 4 5 - - - - 6
# 1 - - - - 2 3 4 5 6
# a: 1 2 3 4 5 6
# b: 1 - - - - 6
# sequences
b_seq = MSA._add_gaps_in_b(msa62, msa62, [1, 6], [1, 6])
@test size(b_seq) == (6, 2)
@test all(b_seq[1, :] .!= GAP)
@test all(b_seq[2:5, :] .== GAP)
@test all(b_seq[6, :] .!= GAP)
# columns
b_col = MSA._add_gaps_in_b(msa26, msa26, [1, 6], [1, 6], 2)
@test size(b_col) == (2, 6)
@test all(b_col[:, 1] .!= GAP)
@test all(b_col[:, 2:5] .== GAP)
@test all(b_col[:, 6] .!= GAP)
end
@testset "gap sequences, gap sequences everywhere" begin
# - 1 - - 2 3 4 5 6 -
# 1 2 3 4 5 - - - - 6
# a: 1 2 3 4 5 6
# b: 2 5 - - - -
# sequences
b_seq = MSA._add_gaps_in_b(msa62, msa62, [1, 2], [2, 5])
@test size(b_seq) == (6, 2)
@test all(b_seq[1:2, :] .!= GAP)
@test all(b_seq[3:6, :] .== GAP)
# columns
b_col = MSA._add_gaps_in_b(msa26, msa26, [1, 2], [2, 5], 2)
@test size(b_col) == (2, 6)
@test all(b_col[:, 1:2] .!= GAP)
@test all(b_col[:, 3:6] .== GAP)
# a: - 1 - - 2 -
# b: 1 2 3 4 5 6
# sequences
a_seq = MSA._add_gaps_in_b(msa62, msa62, [2, 5], [1, 2])
@test size(a_seq) == (6, 2)
@test all(a_seq[1, :] .== GAP)
@test all(a_seq[2, :] .!= GAP)
@test all(a_seq[3:4, :] .== GAP)
@test all(a_seq[5, :] .!= GAP)
@test all(a_seq[6, :] .== GAP)
# columns
a_col = MSA._add_gaps_in_b(msa26, msa26, [2, 5], [1, 2], 2)
@test size(a_col) == (2, 6)
@test all(a_col[:, 1] .== GAP)
@test all(a_col[:, 2] .!= GAP)
@test all(a_col[:, 3:4] .== GAP)
@test all(a_col[:, 5] .!= GAP)
@test all(a_col[:, 6] .== GAP)
end
end
@testset "join MSAs" begin
msa62 = msa2[:, 1:2] # msa62 for sequences test
msa26 = msa2[1:2, :] # msa26 for columns test
@testset "gaps at the beginning and at the end" begin
# a: 1 2 3 4 5 6 - -
# b: - - 1 2 3 4 5 6
@testset "inner join" begin
# a: 3 4 5 6
# b: 1 2 3 4
@testset "sequences" begin
ab = join_msas(msa62, msa62, 3:6 .=> 1:4, kind = :inner, axis = 1)
@test size(ab) == (4, 4)
@test all(ab .!= GAP)
@test sequencenames(ab) ==
["SEQ3_&_SEQ1", "SEQ4_&_SEQ2", "SEQ5_&_SEQ3", "SEQ6_&_SEQ4"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, 3:6 .=> 1:4, kind = :inner, axis = 2)
@test size(ab) == (4, 4)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["3", "4", "5", "6"]
end
end
@testset "outer join" begin
# a: 1 2 3 4 5 6 - -
# b: - - 1 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, 3:6 .=> 1:4) # default: kind=:outer, axis=1
@test size(ab) == (8, 4)
@test sum(ab .== GAP, dims = 1) == [2 2 2 2]
@test vec(sum(ab .== GAP, dims = 2)) == [2, 2, 0, 0, 0, 0, 2, 2]
@test sequencenames(ab) == [
"SEQ1_&_gap:1",
"SEQ2_&_gap:2",
"SEQ3_&_SEQ1",
"SEQ4_&_SEQ2",
"SEQ5_&_SEQ3",
"SEQ6_&_SEQ4",
"gap:1_&_SEQ5",
"gap:2_&_SEQ6",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, 3:6 .=> 1:4, axis = 2) # default: kind=:outer
@test size(ab) == (4, 8)
@test vec(sum(ab .== GAP, dims = 2)) == [2, 2, 2, 2]
@test vec(sum(ab .== GAP, dims = 1)) == [2, 2, 0, 0, 0, 0, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) ==
["1", "2", "3", "4", "5", "6", "gap:1", "gap:2"]
end
end
@testset "left join" begin
# a: 1 2 3 4 5 6
# b: - - 1 2 3 4
@testset "sequences" begin
ab = join_msas(msa62, msa62, 3:6 .=> 1:4, kind = :left, axis = 1)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [0 0 2 2]
@test vec(sum(ab .== GAP, dims = 2)) == [2, 2, 0, 0, 0, 0]
@test sequencenames(ab) == [
"SEQ1_&_gap:1",
"SEQ2_&_gap:2",
"SEQ3_&_SEQ1",
"SEQ4_&_SEQ2",
"SEQ5_&_SEQ3",
"SEQ6_&_SEQ4",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, 3:6 .=> 1:4, kind = :left, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 2, 2]
@test vec(sum(ab .== GAP, dims = 1)) == [2, 2, 0, 0, 0, 0]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "2", "3", "4", "5", "6"]
end
end
@testset "right join" begin
# a: 3 4 5 6 - -
# b: 1 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, 3:6 .=> 1:4, kind = :right, axis = 1)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [2 2 0 0]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 0, 0, 2, 2]
@test sequencenames(ab) == [
"SEQ3_&_SEQ1",
"SEQ4_&_SEQ2",
"SEQ5_&_SEQ3",
"SEQ6_&_SEQ4",
"gap:1_&_SEQ5",
"gap:2_&_SEQ6",
]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, 3:6 .=> 1:4, kind = :right, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [2, 2, 0, 0]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 0, 0, 0, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["3", "4", "5", "6", "gap:1", "gap:2"]
end
end
end
@testset "unique matches between first and last sequence/column" begin
# 1 2 3 4 5 - - - - 6
# 1 - - - - 2 3 4 5 6
@testset "inner join" begin
# a: 1 6
# b: 1 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 6] .=> [1, 6], kind = :inner, axis = 1)
@test size(ab) == (2, 4)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["SEQ1", "SEQ6"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
@test ab == Residue['D' 'A' 'D' 'A'; 'D' 'A' 'D' 'A']
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 6] .=> [1, 6], kind = :inner, axis = 2)
@test size(ab) == (4, 2)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "6"]
@test ab == Residue['D' 'E'; 'D' 'F'; 'D' 'E'; 'D' 'F']
end
end
@testset "left join" begin
# a: 1 2 3 4 5 6
# b: 1 - - - - 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 6] .=> [1, 6], kind = :left)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [0 0 4 4]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 2, 2, 2, 2, 0]
@test sequencenames(ab) ==
["SEQ1", "SEQ2", "SEQ3", "SEQ4", "SEQ5", "SEQ6"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 6] .=> [1, 6], kind = :left, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 4, 4]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 2, 2, 2, 2, 0]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "2", "3", "4", "5", "6"]
end
end
@testset "right join" begin
# a: 1 - - - - 6
# b: 1 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 6] .=> [1, 6], kind = :right)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [4 4 0 0]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 2, 2, 2, 2, 0]
@test sequencenames(ab) ==
["SEQ1", "SEQ2", "SEQ3", "SEQ4", "SEQ5", "SEQ6"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 6] .=> [1, 6], kind = :right, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [4, 4, 0, 0]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 2, 2, 2, 2, 0]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "gap:1", "gap:2", "gap:3", "gap:4", "6"]
end
end
@testset "outer join" begin
# a: 1 2 3 4 5 - - - - 6
# b: 1 - - - - 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [(1, 1), (6, 6)], kind = :outer, axis = 1)
@test size(ab) == (10, 4)
@test sum(ab .== GAP, dims = 1) == [4 4 4 4]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 2, 2, 2, 2, 2, 2, 2, 2, 0]
@test sequencenames(ab) == [
"SEQ1",
"SEQ2_&_gap:1",
"SEQ3_&_gap:2",
"SEQ4_&_gap:3",
"SEQ5_&_gap:4",
"gap:1_&_SEQ2",
"gap:2_&_SEQ3",
"gap:3_&_SEQ4",
"gap:4_&_SEQ5",
"SEQ6",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
# kind=:outer and axis=1 are the default values
@test ab == join_msas(msa62, msa62, [(1, 1), (6, 6)])
@testset "pairing types: _find_pairing_positions" begin
# This is mostly to test the _find_pairing_positions function
# that it is used by join at the beginning of the function.
# Positions
# ---------
# Vector of pairs
@test ab == join_msas(msa62, msa62, [1 => 1, 6 => 6])
# Vector of tuples
@test ab == join_msas(msa62, msa62, [(1, 1), (6, 6)])
# Vector of vectors
@test ab == join_msas(msa62, msa62, [[1, 1], [6, 6]])
# Vector of named tuples
@test ab ==
join_msas(msa62, msa62, [(a = 1, b = 1), (a = 6, b = 6)])
# Tuple of pairs
@test ab == join_msas(msa62, msa62, (1 => 1, 6 => 6))
# Tuple of tuples
@test ab == join_msas(msa62, msa62, ((1, 1), (6, 6)))
# Tuple of vectors
@test ab == join_msas(msa62, msa62, ([1, 1], [6, 6]))
# Tuple of named tuples
@test ab ==
join_msas(msa62, msa62, ((a = 1, b = 1), (a = 6, b = 6)))
# Sequence names
# --------------
# Vector of pairs
@test ab ==
join_msas(msa62, msa62, ["SEQ1" => "SEQ1", "SEQ6" => "SEQ6"])
# Vector of tuples
@test ab ==
join_msas(msa62, msa62, [("SEQ1", "SEQ1"), ("SEQ6", "SEQ6")])
# Vector of vectors
@test ab ==
join_msas(msa62, msa62, [["SEQ1", "SEQ1"], ["SEQ6", "SEQ6"]])
# Vector of named tuples
@test ab == join_msas(
msa62,
msa62,
[(a = "SEQ1", b = "SEQ1"), (a = "SEQ6", b = "SEQ6")],
)
# Tuple of pairs
@test ab ==
join_msas(msa62, msa62, ("SEQ1" => "SEQ1", "SEQ6" => "SEQ6"))
# Tuple of tuples
@test ab ==
join_msas(msa62, msa62, (("SEQ1", "SEQ1"), ("SEQ6", "SEQ6")))
# Tuple of vectors
@test ab ==
join_msas(msa62, msa62, (["SEQ1", "SEQ1"], ["SEQ6", "SEQ6"]))
# Tuple of named tuples
@test ab == join_msas(
msa62,
msa62,
((a = "SEQ1", b = "SEQ1"), (a = "SEQ6", b = "SEQ6")),
)
# OrderedDict
# -----------
# NOTE: join change its behavior depending on whether the pairing
# positions are sorted or not. Here, OrderedDict ensures that the
# positions are sorted.
@test ab ==
join_msas(msa62, msa62, OrderedDict{Int,Int}(1 => 1, 6 => 6))
end
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [(1, 1), (6, 6)], kind = :outer, axis = 2)
@test size(ab) == (4, 10)
@test vec(sum(ab .== GAP, dims = 2)) == [4, 4, 4, 4]
@test sum(ab .== GAP, dims = 1) == [0 2 2 2 2 2 2 2 2 0]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) ==
["1", "2", "3", "4", "5", "gap:1", "gap:2", "gap:3", "gap:4", "6"]
end
end
end
@testset "gap sequences, gap sequences everywhere" begin
# - 1 - - 2 3 4 5 6 -
# 1 2 3 4 5 - - - - 6
@testset "inner join" begin
# a: 1 2
# b: 2 5
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 2] .=> [2, 5], kind = :inner, axis = 1)
@test size(ab) == (2, 4)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["SEQ1_&_SEQ2", "SEQ2_&_SEQ5"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
@test ab == Residue['D' 'A' 'D' 'A'; 'D' 'A' 'D' 'A']
@testset "annotations" begin
@test getannotfile(ab, "NCol") == "6_&_6"
@test gethcatmapping(ab) == [1, 1, 2, 2]
for seq = 1:2
@test getsequencemapping(ab, seq) == [1, 2, 1, 2]
end
end
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 2] .=> [2, 5], kind = :inner, axis = 2)
@test size(ab) == (4, 2)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "2"]
@test ab == Residue['D' 'A'; 'D' 'A'; 'A' 'E'; 'A' 'E']
@testset "annotations" begin
@test getannotfile(ab, "1_NCol") == "6"
@test getannotfile(ab, "2_NCol") == "6"
@test getannotfile(ab, "1_ColMap") == "1,2"
@test getannotfile(ab, "2_ColMap") == "2,5"
@test_throws ErrorException gethcatmapping(ab)
@test getsequencemapping(ab, 1) == [1, 2]
@test getsequencemapping(ab, 3) == [2, 5]
end
end
end
@testset "left join" begin
# a: 1 2 3 4 5 6
# b: 2 5 - - - -
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 2] .=> [2, 5], kind = :left)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [0 0 4 4]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 2, 2, 2, 2]
@test sequencenames(ab) == [
"SEQ1_&_SEQ2",
"SEQ2_&_SEQ5",
"SEQ3",
"SEQ4",
"SEQ5_&_gap:1",
"SEQ6",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
@testset "annotations" begin
@test getannotfile(ab, "NCol") == "6_&_6"
@test gethcatmapping(ab) == [1, 1, 2, 2]
@test getannotfile(ab, "ColMap") == "1,2,1,2"
for seq = 1:2
@test getsequencemapping(ab, seq) == [1, 2, 1, 2]
end
for seq = 3:6
@test getsequencemapping(ab, seq) == [1, 2, 0, 0]
end
end
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 2] .=> [2, 5], kind = :left, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 4, 4]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 0, 2, 2, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "2", "3", "4", "5", "6"]
@testset "annotations" begin
@test getannotfile(ab, "1_NCol") == "6"
@test getannotfile(ab, "2_NCol") == "6"
@test getannotfile(ab, "1_ColMap") == "1,2,3,4,5,6"
@test getannotfile(ab, "2_ColMap") == "2,5,,,,"
@test_throws ErrorException gethcatmapping(ab)
for seq = 1:2
@test getsequencemapping(ab, seq) == [1, 2, 3, 4, 5, 6]
end
for seq = 3:4
@test getsequencemapping(ab, seq) == [2, 5, 0, 0, 0, 0]
end
end
end
end
@testset "right join" begin
# a: - 1 - - 2 -
# b: 1 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 2] .=> [2, 5], kind = :right)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [4 4 0 0]
@test vec(sum(ab .== GAP, dims = 2)) == [2, 0, 2, 2, 0, 2]
@test sequencenames(ab) == [
"gap:1_&_SEQ1",
"SEQ1_&_SEQ2",
"SEQ3",
"SEQ4",
"SEQ2_&_SEQ5",
"SEQ6",
]
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 2] .=> [2, 5], kind = :right, axis = 2)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [4, 4, 0, 0]
@test vec(sum(ab .== GAP, dims = 1)) == [2, 0, 2, 2, 0, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["gap:1", "1", "gap:2", "gap:3", "2", "gap:4"]
end
end
@testset "outer join" begin
# a: - 1 - - 2 3 4 5 6 -
# b: 1 2 3 4 5 - - - - 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 2] .=> [2, 5], kind = :outer)
@test size(ab) == (10, 4)
@test sum(ab .== GAP, dims = 1) == [4 4 4 4]
@test vec(sum(ab .== GAP, dims = 2)) == [2, 0, 2, 2, 0, 2, 2, 2, 2, 2]
@test sequencenames(ab) == [
"gap:1_&_SEQ1",
"SEQ1_&_SEQ2",
"gap:2_&_SEQ3",
"gap:3_&_SEQ4",
"SEQ2_&_SEQ5",
"SEQ3_&_gap:1",
"SEQ4_&_gap:2",
"SEQ5_&_gap:3",
"SEQ6_&_gap:4",
"gap:4_&_SEQ6",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
@testset "pairing types: _find_pairing_positions" begin
# See the notes in the previous _find_pairing_positions testset.
# Positions
# ---------
# Vector of pairs
@test ab == join_msas(msa62, msa62, [1 => 2, 2 => 5])
# Vector of tuples
@test ab == join_msas(msa62, msa62, [(1, 2), (2, 5)])
# Vector of vectors
@test ab == join_msas(msa62, msa62, [[1, 2], [2, 5]])
# Vector of named tuples
@test ab ==
join_msas(msa62, msa62, [(a = 1, b = 2), (a = 2, b = 5)])
# Tuple of pairs
@test ab == join_msas(msa62, msa62, (1 => 2, 2 => 5))
# Tuple of tuples
@test ab == join_msas(msa62, msa62, ((1, 2), (2, 5)))
# Tuple of vectors
@test ab == join_msas(msa62, msa62, ([1, 2], [2, 5]))
# Tuple of named tuples
@test ab ==
join_msas(msa62, msa62, ((a = 1, b = 2), (a = 2, b = 5)))
# Sequence names
# --------------
# Vector of pairs
@test ab ==
join_msas(msa62, msa62, ["SEQ1" => "SEQ2", "SEQ2" => "SEQ5"])
# Vector of tuples
@test ab ==
join_msas(msa62, msa62, [("SEQ1", "SEQ2"), ("SEQ2", "SEQ5")])
# Vector of vectors
@test ab ==
join_msas(msa62, msa62, [["SEQ1", "SEQ2"], ["SEQ2", "SEQ5"]])
# Vector of named tuples
@test ab == join_msas(
msa62,
msa62,
[(a = "SEQ1", b = "SEQ2"), (a = "SEQ2", b = "SEQ5")],
)
# Tuple of pairs
@test ab ==
join_msas(msa62, msa62, ("SEQ1" => "SEQ2", "SEQ2" => "SEQ5"))
# Tuple of tuples
@test ab ==
join_msas(msa62, msa62, (("SEQ1", "SEQ2"), ("SEQ2", "SEQ5")))
# Tuple of vectors
@test ab ==
join_msas(msa62, msa62, (["SEQ1", "SEQ2"], ["SEQ2", "SEQ5"]))
# Tuple of named tuples
@test ab == join_msas(
msa62,
msa62,
((a = "SEQ1", b = "SEQ2"), (a = "SEQ2", b = "SEQ5")),
)
# OrderedDict
# -----------
@test ab ==
join_msas(msa62, msa62, OrderedDict{Int,Int}(1 => 2, 2 => 5))
@test ab == join_msas(
msa62,
msa62,
OrderedDict{String,String}("SEQ1" => "SEQ2", "SEQ2" => "SEQ5"),
)
end
@testset "annotations" begin
@test getannotfile(ab, "NCol") == "6_&_6"
@test gethcatmapping(ab) == [1, 1, 2, 2]
@test getannotfile(ab, "ColMap") == "1,2,1,2"
for seq in [2, 5]
@test getsequencemapping(ab, seq) == [1, 2, 1, 2]
end
for seq in [1, 3, 4, 10]
@test getsequencemapping(ab, seq) == [0, 0, 1, 2]
end
for seq in [6, 7, 8, 9]
@test getsequencemapping(ab, seq) == [1, 2, 0, 0]
end
end
end
@testset "columns" begin
ab = join_msas(msa26, msa26, [1, 2] .=> [2, 5], kind = :outer, axis = 2)
@test size(ab) == (4, 10)
@test vec(sum(ab .== GAP, dims = 2)) == [4, 4, 4, 4]
@test vec(sum(ab .== GAP, dims = 1)) == [2, 0, 2, 2, 0, 2, 2, 2, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) ==
["gap:1", "1", "gap:2", "gap:3", "2", "3", "4", "5", "6", "gap:4"]
@testset "annotations" begin
@test getannotfile(ab, "1_NCol") == "6"
@test getannotfile(ab, "2_NCol") == "6"
@test getannotfile(ab, "1_ColMap") == ",1,,,2,3,4,5,6,"
@test getannotfile(ab, "2_ColMap") == "1,2,3,4,5,,,,,6"
@test_throws ErrorException gethcatmapping(ab)
@test getsequencemapping(ab, 1) == [0, 1, 0, 0, 2, 3, 4, 5, 6, 0]
@test getsequencemapping(ab, 3) == [1, 2, 3, 4, 5, 0, 0, 0, 0, 6]
end
end
end
end
@testset "unsorted positions" begin
# When the pairing positions are not sorted, the join function will behave
# differently depending on the kind of join. For an inner join,
# the join function will keep the order indicating by the pairing.
# For the outer join it will first match the positions as indicated in the
# pairing input. Then, it will add the remaining unmatched sequences/columns
# for `msa_a` and `msa_b` in that order. In the case of left and right join,
# the order of the sequences/columns will be the same as the order in the
# left/right MSA.
# - 1 4 2 3 5 6 - -
# 1 3 4 2 - - - 5 6
@testset "inner join" begin
# a: 1 4 2
# b: 3 4 2
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 4, 2] .=> [3, 4, 2], kind = :inner)
@test size(ab) == (3, 4)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["SEQ1_&_SEQ3", "SEQ4", "SEQ2"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(
msa26,
msa26,
[1, 4, 2] .=> [3, 4, 2],
kind = :inner,
axis = 2,
)
@test size(ab) == (4, 3)
@test all(ab .!= GAP)
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "4", "2"]
end
end
@testset "left join" begin
# a: 1 2 3 4 5 6
# b: 3 2 - 4 - -
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 4, 2] .=> [3, 4, 2], kind = :left)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [0 0 3 3]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 2, 0, 2, 2]
@test sequencenames(ab) ==
["SEQ1_&_SEQ3", "SEQ2", "SEQ3_&_gap:1", "SEQ4", "SEQ5", "SEQ6"]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
end
@testset "columns" begin
ab = join_msas(
msa26,
msa26,
[1, 4, 2] .=> [3, 4, 2],
kind = :left,
axis = 2,
)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 3, 3]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 0, 2, 0, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["1", "2", "3", "4", "5", "6"]
end
end
@testset "right join" begin
# a: - 2 1 4 - -
# a: 1 2 3 4 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 4, 2] .=> [3, 4, 2], kind = :right)
@test size(ab) == (6, 4)
@test sum(ab .== GAP, dims = 1) == [3 3 0 0]
@test vec(sum(ab .== GAP, dims = 2)) == [2, 0, 0, 0, 2, 2]
@test sequencenames(ab) ==
["gap:1_&_SEQ1", "SEQ2", "SEQ1_&_SEQ3", "SEQ4", "SEQ5", "SEQ6"]
end
@testset "columns" begin
ab = join_msas(
msa26,
msa26,
[1, 4, 2] .=> [3, 4, 2],
kind = :right,
axis = 2,
)
@test size(ab) == (4, 6)
@test vec(sum(ab .== GAP, dims = 2)) == [3, 3, 0, 0]
@test vec(sum(ab .== GAP, dims = 1)) == [2, 0, 0, 0, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) == ["gap:1", "2", "1", "4", "gap:2", "gap:3"]
end
end
@testset "outer join" begin
# a: 1 4 2 3 5 6 - - -
# b: 3 4 2 - - - 1 5 6
@testset "sequences" begin
ab = join_msas(msa62, msa62, [1, 4, 2] .=> [3, 4, 2], kind = :outer)
@test size(ab) == (9, 4)
@test sum(ab .== GAP, dims = 1) == [3 3 3 3]
@test vec(sum(ab .== GAP, dims = 2)) == [0, 0, 0, 2, 2, 2, 2, 2, 2]
@test sequencenames(ab) == [
"SEQ1_&_SEQ3",
"SEQ4",
"SEQ2",
"SEQ3_&_gap:1",
"SEQ5_&_gap:2",
"SEQ6_&_gap:3",
"gap:1_&_SEQ1",
"gap:2_&_SEQ5",
"gap:3_&_SEQ6",
]
@test columnnames(ab) == ["1_1", "1_2", "2_1", "2_2"]
@testset "annotations" begin
@test getannotfile(ab, "NCol") == "6_&_6"
@test gethcatmapping(ab) == [1, 1, 2, 2]
@test getannotfile(ab, "ColMap") == "1,2,1,2"
for seq = 1:3
@test getsequencemapping(ab, seq) == [1, 2, 1, 2]
end
for seq = 4:6
@test getsequencemapping(ab, seq) == [1, 2, 0, 0]
end
for seq = 7:9
@test getsequencemapping(ab, seq) == [0, 0, 1, 2]
end
end
end
@testset "columns" begin
ab = join_msas(
msa26,
msa26,
[1, 4, 2] .=> [3, 4, 2],
kind = :outer,
axis = 2,
)
@test size(ab) == (4, 9)
@test vec(sum(ab .== GAP, dims = 2)) == [3, 3, 3, 3]
@test vec(sum(ab .== GAP, dims = 1)) == [0, 0, 0, 2, 2, 2, 2, 2, 2]
@test sequencenames(ab) == ["1_SEQ1", "1_SEQ2", "2_SEQ1", "2_SEQ2"]
@test columnnames(ab) ==
["1", "4", "2", "3", "5", "6", "gap:1", "gap:2", "gap:3"]
@testset "annotations" begin
@test getannotfile(ab, "1_NCol") == "6"
@test getannotfile(ab, "2_NCol") == "6"
@test getannotfile(ab, "1_ColMap") == "1,4,2,3,5,6,,,"
@test getannotfile(ab, "2_ColMap") == "3,4,2,,,,1,5,6"
@test_throws ErrorException gethcatmapping(ab)
for seq = 1:2
@test getsequencemapping(ab, seq) == [1, 4, 2, 3, 5, 6, 0, 0, 0]
end
for seq = 3:4
@test getsequencemapping(ab, seq) == [3, 4, 2, 0, 0, 0, 1, 5, 6]
end
end
end
end
end
@testset "ArgumentErrors" begin
# axis is not 1 or 2
@test_throws ArgumentError join_msas(msa62, msa62, [1, 2] .=> [3, 4], axis = 3)
# kind is not :inner, :left, :right, or :outer
@test_throws ArgumentError join_msas(
msa62,
msa62,
[1, 2] .=> [3, 4],
kind = :iner,
)
# pairing is empty
@test_throws ArgumentError join_msas(msa62, msa62, [])
# each element of the pairing is not a pair
@test_throws ArgumentError join_msas(msa62, msa62, [1, 2, 3])
# the list of positions are not of the same length
@test_throws ArgumentError join_msas(msa62, msa62, [1, 2], [3, 4, 5])
end
@testset "Using two position lists instead of a list of pairs" begin
for axis in [1, 2]
for kind in [:inner, :left, :right, :outer]
@test join_msas(
msa62,
msa62,
[1, 2],
[2, 1],
kind = kind,
axis = axis,
) == join_msas(
msa62,
msa62,
[(1, 2), (2, 1)],
kind = kind,
axis = axis,
)
end
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 5130 | @testset "Test using MSA files" begin
msa_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
MultipleSequenceAlignment,
AnnotatedMultipleSequenceAlignment,
)
pf09645_sto = joinpath(DATA, "PF09645_full.stockholm")
gaoetal2011 = joinpath(DATA, "Gaoetal2011.fasta")
gaoetal_msas = [read_file(gaoetal2011, FASTA, T) for T in msa_types]
pfam_msas = [read_file(pf09645_sto, Stockholm, T) for T in msa_types]
@testset "getindex" begin
residues = permutedims(
hcat(
res"DAWAEE",
res"DAWAEF",
res"DAWAED",
res"DAYCMD",
res"DAYCMT",
res"DAYCMT",
),
[2, 1],
)
for index in [
(2:4, 2:4),
(:, [1, 2, 3, 4, 5, 6] .< 4),
([1, 2, 3, 4, 5, 6] .< 4, :),
(:, :),
([1, 2, 3, 4, 5, 6] .< 4, [1, 2, 3, 4, 5, 6] .< 4),
]
for msa in gaoetal_msas
selection = msa[index...]
@test selection == residues[index...]
@test selection isa typeof(msa)
end
end
for index in [2, (2, :), (:, 2), (2, 2), (2, [3, 4, 5])]
for msa in gaoetal_msas
@test msa[index...] == residues[index...]
end
end
end
@testset "setindex! and copy" begin
for msa in gaoetal_msas
copy_msa = copy(msa)
deepcopy_msa = deepcopy(msa)
for (index, value) in [((1), Residue('H')), ((:, 1), res"HHHHHH")]
deepcopy_msa[index...] = value
@test deepcopy_msa[index...] == value
@test msa[index...] != value
copy_msa[index...] = value
@test copy_msa[index...] == value
@test msa[index...] != value
end
for (index, value) in [(1, Residue('H')), (4, Residue('X')), (:, res"HHHHHH")]
seq = copy(getsequence(msa, 4))
seq[1, index] = value
@test seq[1, index] == value
@test msa[4, index] != value # since seq is a copy
end
end
end
@testset "Size" begin
for aln in gaoetal_msas
@test size(aln) == (6, 6)
@test length(aln) == 36
@test ncolumns(aln) == 6
@test nsequences(aln) == 6
@test ncolumns(getsequence(aln, 4)) == 6
end
for aln in pfam_msas
@test size(aln) == (4, 110)
@test length(aln) == 440
@test ncolumns(aln) == 110
@test nsequences(aln) == 4
end
end
@testset "AnnotatedAlignedSequence and AlignedSequence" begin
@testset "Creation" begin
seq_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
AlignedSequence,
AnnotatedAlignedSequence,
)
for i in eachindex(msa_types)
M = msa_types[i]
S = seq_types[i]
msa = pfam_msas[i]
if M != Matrix{Residue}
for id in [
"C3N734_SULIY/1-95",
"H2C869_9CREN/7-104",
"Y070_ATV/2-70",
"F112_SSV1/3-112",
]
annseq = getsequence(msa, id)
@test msa[id, :] == vec(annseq) # Sequences are matrices
@test isa(annseq, S)
end
end
for seq = 1:4
annseq = getsequence(msa, seq)
@test msa[seq, :] == vec(annseq) # Sequences are matrices
@test isa(annseq, S)
end
end
end
@testset "Annotations" begin
msa = read_file(pf09645_sto, Stockholm)
@test getannotcolumn(msa, "SS_cons") ==
getannotcolumn(getsequence(msa, 4), "SS_cons")
@test getannotfile(msa) == getannotfile(getsequence(msa, 4))
# The sequence name is only needed when working with MSA objects.
@test getannotresidue(msa, "F112_SSV1/3-112", "SS") ==
getannotresidue(getsequence(msa, 4), "SS")
@test getannotsequence(msa, "F112_SSV1/3-112", "DR") ==
getannotsequence(getsequence(msa, 4), "DR")
end
end
@testset "Print" begin
pfam = read_file(pf09645_sto, Stockholm)
gao = read_file(gaoetal2011, FASTA)
@test stringsequence(gao, 4) == "DAYCMD"
@test stringsequence(pfam, 1) == stringsequence(pfam, "C3N734_SULIY/1-95")
for T in (Stockholm, FASTA, Raw)
buffer = IOBuffer()
print_file(buffer, pfam, T)
@test parse_file(String(take!(buffer)), T) == pfam
print_file(buffer, gao, T)
@test parse_file(String(take!(buffer)), T) == gao
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2605 | @testset "GeneralParserMethods" begin
@testset "Test input lengths" begin
# NOTE: _pre_read... functions should call _check_seq_len
# if _convert_to_matrix_residues is used
ids = ["A", "B", "C"]
seqs_equal = ["MSRSKRDNEIGDSTF", "MSRSKRDNNFGDSTF", "MSRSFYSVEIGDSTF"]
seqs_diffs = ["MSRSKRDNEIGDSTF", "MSRSKRDNNFGDSTF", "MSRSFYSVEIGD"]
seqs_less = ["MSRSKRDNEIGDSTF", "MSRSKRDNNFGDSTF"]
@test MSA._check_seq_len(ids, seqs_equal) === nothing
@test_throws ErrorException MSA._check_seq_len(ids, seqs_diffs)
@test_throws ErrorException MSA._check_seq_len(ids, seqs_less)
end
@testset "To parse MSA & mapping" begin
sequences = ["ADEIMSY", "RCGLFTV", "NQHKPW-"]
matrixres = reshape(reinterpret(Residue, collect(1:21)), (3, 7))
msa, map = MSA._to_msa_mapping(sequences)
@test getarray(msa) == matrixres
@test map == ["1,2,3,4,5,6,7", "1,2,3,4,5,6,7", "1,2,3,4,5,6,"]
@test sequencenames(msa) == ["1", "2", "3"]
# MSA constructors adds dimension names
# @test dimnames(msa) == [:Seq, :Col]
msa, map = MSA._to_msa_mapping(sequences, ["a/11-17", "b/11-17", "c/11-16"])
@test getarray(msa) == matrixres
@test map == ["11,12,13,14,15,16,17", "11,12,13,14,15,16,17", "11,12,13,14,15,16,"]
@test sequencenames(msa) == ["a/11-17", "b/11-17", "c/11-16"]
# MSA constructors adds dimension names
# @test dimnames(msa) == [:Seq, :Col]
@test_throws ErrorException MSA._to_msa_mapping(
sequences,
["a/1-7", "b/1-7", "c/1-7"],
)
@test_throws ErrorException MSA._to_msa_mapping(["AD", "EI", "MSY"])
end
@testset "Delete full gap columns" begin
M = reshape(reinterpret(Residue, collect(1:21)), (3, 7))
M[:, [2, 4, 6]] .= GAP
msa = MultipleSequenceAlignment(M)
named = namedmatrix(msa)
annotated_msa = AnnotatedMultipleSequenceAlignment(M)
d_M = deletefullgapcolumns(M)
d_msa = deletefullgapcolumns(msa)
d_named = deletefullgapcolumns(named)
d_annotated_msa = deletefullgapcolumns(annotated_msa)
@test size(d_M) == (3, 4)
for object in [d_msa, d_named, d_annotated_msa]
@test size(object) == (3, 4)
@test getcolumnmapping(object) == [1, 3, 5, 7]
end
d_msa = deletefullgapcolumns!(msa)
d_annotated_msa = deletefullgapcolumns!(annotated_msa)
@test d_msa == msa
@test d_annotated_msa == annotated_msa
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 6575 | @testset "getindex" begin
simple = joinpath(DATA, "simple.fasta")
@testset "get index" begin
msa = read_file(simple, FASTA, generatemapping = true)
first_seq = getsequence(msa, 1)
@test first_seq isa AbstractAlignedSequence
matrix_msa = convert(Matrix{Residue}, msa)
@testset "sequence_index" begin
@test sequence_index(msa, "ONE") == 1
@test sequence_index(msa, "TWO") == 2
@test_throws KeyError sequence_index(msa, "THREE") # non-existent sequence
# Matrix{Residue}
@test_throws ErrorException sequence_index(matrix_msa, "ONE")
# AlignedSequence
@test sequence_index(first_seq, "ONE") == 1
@test_throws KeyError sequence_index(first_seq, "TWO") # non-existent sequence
# Test returning the same index when an integer is passed
@test sequence_index(msa, 1) == 1
@test sequence_index(msa, 2) == 2
end
@testset "column_index" begin
@test column_index(msa, "1") == 1
@test column_index(msa, "2") == 2
@test_throws KeyError column_index(msa, "3") # non-existent column
# Matrix{Residue}
@test_throws ErrorException column_index(matrix_msa, "1")
# AlignedSequence
@test column_index(first_seq, "1") == 1
@test column_index(first_seq, "2") == 2
@test_throws KeyError column_index(first_seq, "3") # non-existent column
# Test returning the same index when an integer is passed
@test column_index(msa, 1) == 1
@test column_index(msa, 2) == 2
end
end
@testset "MSA" begin
msa = read_file(simple, FASTA, generatemapping = true)
matrix = Residue[
'A' 'R'
'R' 'A'
]
@test msa == matrix
@test getcolumnmapping(msa) == [1, 2]
@test getsequencemapping(msa, "ONE") == [1, 2]
@test getsequencemapping(msa, "TWO") == [1, 2]
for reverse_col_selector in [[2, 1], 2:-1:1, String["2", "1"]]
ref = Residue['R' 'A'; 'A' 'R']
reversed_cols = msa[:, reverse_col_selector]
@test reversed_cols == ref
@test getcolumnmapping(reversed_cols) == [2, 1]
@test getsequencemapping(reversed_cols, "ONE") == [2, 1]
@test getsequencemapping(reversed_cols, "TWO") == [2, 1]
@test sequencenames(reversed_cols) == ["ONE", "TWO"]
@test columnnames(reversed_cols) == ["2", "1"]
annot_values = Set(values(getannotfile(reversed_cols)))
@test any(
occursin("filtercolumns! : 2 columns have been selected.", val) for
val in annot_values
)
@test any(
occursin("filtercolumns! : column order has changed!", val) for
val in annot_values
)
@test MultipleSequenceAlignment(msa)[:, reverse_col_selector] == ref
end
for reverse_seq_selector in [[2, 1], 2:-1:1, String["TWO", "ONE"]]
ref = Residue['R' 'A'; 'A' 'R']
reversed_seqs = msa[reverse_seq_selector, :]
@test reversed_seqs == ref
@test getcolumnmapping(reversed_seqs) == [1, 2]
@test getsequencemapping(reversed_seqs, "ONE") == [1, 2]
@test getsequencemapping(reversed_seqs, "TWO") == [1, 2]
@test sequencenames(reversed_seqs) == ["TWO", "ONE"]
@test columnnames(reversed_seqs) == ["1", "2"]
@test MultipleSequenceAlignment(msa)[reverse_seq_selector, :] == ref
end
ref_single_col = matrix[:, [2]]
for single_col_selector in [[2], [false, true], String["2"]]
single_col = msa[:, single_col_selector]
@test single_col == ref_single_col
@test getcolumnmapping(single_col) == [2]
@test getsequencemapping(single_col, "ONE") == [2]
@test getsequencemapping(single_col, "TWO") == [2]
@test sequencenames(single_col) == ["ONE", "TWO"]
@test columnnames(single_col) == ["2"]
annot_values = Set(values(getannotfile(single_col)))
@test any(
occursin("filtercolumns! : 1 column has been", val) for val in annot_values
)
@test MultipleSequenceAlignment(msa)[:, single_col_selector] == ref_single_col
end
ref_single_seq = matrix[[2], :]
for single_seq_selector in [[2], [false, true], String["TWO"]]
single_seq = msa[single_seq_selector, :]
@test single_seq == ref_single_seq
@test getcolumnmapping(single_seq) == [1, 2]
@test_throws KeyError getsequencemapping(single_seq, "ONE")
@test getsequencemapping(single_seq, "TWO") == [1, 2]
@test sequencenames(single_seq) == ["TWO"]
@test columnnames(single_seq) == ["1", "2"]
annot_values = Set(values(getannotfile(single_seq)))
@test any(
occursin("filtersequences! : 1 sequence has been", val) for
val in annot_values
)
@test MultipleSequenceAlignment(msa)[single_seq_selector, :] == ref_single_seq
end
ref_single_res = matrix[[2], [2]]
for single_seq_selector in [[2], [false, true], String["TWO"]]
for single_col_selector in [[2], [false, true], String["2"]]
single_res = msa[single_seq_selector, single_col_selector]
@test single_res == ref_single_res
@test getcolumnmapping(single_res) == [2]
@test_throws KeyError getsequencemapping(single_res, "ONE")
@test getsequencemapping(single_res, "TWO") == [2]
@test sequencenames(single_res) == ["TWO"]
@test columnnames(single_res) == ["2"]
annot_values = Set(values(getannotfile(single_res)))
@test any(
occursin("filtersequences! : 1 sequence has been", val) for
val in annot_values
)
@test any(
occursin("filtercolumns! : 1 column has been", val) for
val in annot_values
)
@test MultipleSequenceAlignment(msa)[
single_seq_selector,
single_col_selector,
] == ref_single_res
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1048 | @testset "Clusters" begin
for int = 1:100
@test getweight(NoClustering(), int) == 1.0
end
end
@testset "Hobohm I" begin
# DAWAEE
# DAWAEF 83.3
# DAWAED 83.3
# DAYCMD 33.3
# DAYCMT 33.3 83.3
# DAYCMT 33.3 83.3
fasta = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA)
clusters = hobohmI(fasta, 62)
@test nclusters(clusters) == 2
@test nelements(clusters) == 6
@test getweight(clusters, 1) == 1 / 3
@test getweight(clusters, 6) == 1 / 3
@testset "Clusters getters" begin
@test getweight(clusters) == clusters.weights
@test assignments(clusters) == clusters.clusters
@test counts(clusters) == clusters.clustersize
end
@testset "Convert to Clusters" begin
@test convert(Clusters, clusters) == clusters
distance = convert(Matrix{Float64}, 100.0 .- percentidentity(fasta))
cr = Clustering.dbscan(distance, 38.0, metric = nothing, min_neighbors = 2)
@test convert(Clusters, cr) == clusters
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 23000 | @testset "IO" begin
msa_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
MultipleSequenceAlignment,
AnnotatedMultipleSequenceAlignment,
)
# Sequence from PF09645
F112_SSV1 = collect(
".....QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPD" *
"ECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ.....",
)
# Test pfam stockholm parser using the 4 sequence full MSA for PF09645
# > Order: Tree
# > Inserts lower case
# > Gaps as "." or "-" (mixed)
# > Pfam version 28.0, based on UniProt release 2014_07
@testset "Stockholm" begin
pf09645_sto = joinpath(DATA, "PF09645_full.stockholm")
rna_sto = joinpath(DATA, "upsk_rna.sto")
clustal_sto = joinpath(DATA, "clustalo-I20240512-trunc.aln-stockholm")
@testset "Read" begin
# TODO : @inferred read_file(pf09645_sto, Stockholm);
@test isa(read_file(pf09645_sto, Stockholm), AnnotatedMultipleSequenceAlignment)
for T in msa_types
@test isa(read_file(pf09645_sto, Stockholm, T), T)
end
for T in msa_types
@test read_file(pf09645_sto, Stockholm, T) ==
read_file(pf09645_sto, Stockholm)
end
@testset "Empty lines" begin
# This Rfam alignment has an empty line between the file annotations and the sequences
for T in msa_types # NOTE: nucleotides as Residue objects
@test isa(read_file(rna_sto, Stockholm, T), T)
end
end
end
@testset "Output types" begin
msa_objects = [read_file(pf09645_sto, Stockholm, T) for T in msa_types]
@testset "Sequence Names" begin
default = ["1", "2", "3", "4"]
pfnames = [
"C3N734_SULIY/1-95",
"H2C869_9CREN/7-104",
"Y070_ATV/2-70",
"F112_SSV1/3-112",
]
@test sequencenames(msa_objects[1]) == default
for i = 2:4
@test sequencenames(msa_objects[i]) == pfnames
end
end
@testset "Size" begin
for msa in msa_objects
@test size(msa, 1) == 4
@test size(msa, 2) == sum(F112_SSV1 .!= Ref('.')) # without inserts
@test view(msa, 4, :) == map(Residue, F112_SSV1[F112_SSV1.!=Ref('.')])
end
msacl = read_file(
clustal_sto,
Stockholm,
generatemapping = true,
useidcoordinates = true,
)
@test size(msacl, 1) == 2
@test maximum(getsequencemapping(msacl, "Q8BX79|reviewed|Probable")) == 349
@test maximum(getsequencemapping(msacl, "Q923Y8|reviewed|Trace")) == 332
end
end
@testset "Annotations" begin
msa = read_file(pf09645_sto, Stockholm)
@test !isempty(msa)
@test length(annotations(msa)) > 8 # 8 + modifications
@test length(getannotcolumn(msa)) == 2
@test length(getannotresidue(msa)) == 1
@test getannotresidue(msa, "F112_SSV1/3-112", "SS") ==
"X---HHHHHHHHHHHHHHHSEE-HHHHHHHH---HHHHHHHHHHHHHHHHH-TTTEEEEE-SS-EEEEE--XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
@test getannotcolumn(msa, "seq_cons") ==
"...NshphAclhaKILppKtElolEDIlAQFEISsosAYsI.+sL+hICEpH.-ECpsppKsRKTlhh.hKpEphppptpEp..ppItKIhsAp................"
@test getannotsequence(msa, "F112_SSV1/3-112", "DR") == "PDB; 2VQC A; 4-73;"
end
@testset "String input" begin
pfam_string = """
# STOCKHOLM 1.0
#=GS C3N734_SULIY/1-95 AC C3N734.1
#=GS H2C869_9CREN/7-104 AC H2C869.1
#=GS Y070_ATV/2-70 AC Q3V4T1.1
#=GS F112_SSV1/3-112 AC P20220.1
#=GS F112_SSV1/3-112 DR PDB; 2VQC A; 4-73;
C3N734_SULIY/1-95 ...mp---NSYQMAEIMYKILQQKKEISLEDILAQFEISASTAYNVQRTLRMICEKHPDECEVQTKNRRTIFKWIKNEETTEEGQEE--QEIEKILNAQPAE-------------k....
H2C869_9CREN/7-104 ...nk--LNDVQRAKLLVKILQAKGELDVYDIMLQFEISYTRAIPIMKLTRKICEAQ-EICTYDEKEHKLVSLNAKKEKVEQDEEENEREEIEKILDAH----------------trreq
Y070_ATV/2-70 qsvne-------VAQQLFSKLREKKEITAEDIIAIYNVTPSVAYAIFTVLKVMCQQHQGECQAIKRGRKTVI-------------------------------------------vskq.
F112_SSV1/3-112 .....QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ.....
#=GR F112_SSV1/3-112 SS .....X---HHHHHHHHHHHHHHHSEE-HHHHHHHH---HHHHHHHHHHHHHHHHH-TTTEEEEE-SS-EEEEE--XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.....
#=GC SS_cons .....X---HHHHHHHHHHHHHHHSEE-HHHHHHHH---HHHHHHHHHHHHHHHHH-TTTEEEEE-SS-EEEEE--XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.....
#=GC seq_cons ........NshphAclhaKILppKtElolEDIlAQFEISsosAYsI.+sL+hICEpH.-ECpsppKsRKTlhh.hKpEphppptpEp..ppItKIhsAp................h....
//
"""
@test parse_file(pfam_string, Stockholm) == read_file(pf09645_sto, Stockholm)
msa = parse_file(pfam_string, Stockholm)
@test !isempty(msa)
@test length(annotations(msa)) > 8 # 8 + modifications
@test length(getannotcolumn(msa)) == 2
@test length(getannotresidue(msa)) == 1
end
@testset "File write Matrix{Residue}" begin
path = tempdir()
tmp_file = joinpath(path, ".tmp.stockholm")
try
msa = read_file(pf09645_sto, Stockholm, Matrix{Residue})
write_file(tmp_file, msa, Stockholm)
@test read_file(tmp_file, Stockholm, Matrix{Residue}) == msa
finally
if isfile(tmp_file)
rm(tmp_file)
end
end
end
@testset "Keep insert columns" begin
msa = read_file(pf09645_sto, Stockholm, keepinserts = true)
# Aligned columns
@test (collect(getannotcolumn(msa, "Aligned")) .== Ref('1')) ==
(F112_SSV1 .!= Ref('.'))
seq = "...mp---NSYQMAEIMYKILQQKKEISLEDILAQFEISASTAYNVQRTLRMICEKHPDECEVQTKNRRTIFKWIKNEETTEEGQEE--QEIEKILNAQPAE-------------k...."
@test stringsequence(msa, 1) == replace(uppercase(seq), '.' => '-')
@testset "Print inserts" begin
io = IOBuffer()
print_file(io, msa, Stockholm)
printed = String(take!(io))
@test occursin(seq, printed)
end
end
end
@testset "FASTA" begin
pf09645_fas = joinpath(DATA, "PF09645_full.fasta.gz")
gaoetal2011 = joinpath(DATA, "Gaoetal2011.fasta")
@testset "Read" begin
@test isa(read_file(pf09645_fas, FASTA), AnnotatedMultipleSequenceAlignment)
for T in msa_types
@test isa(read_file(pf09645_fas, FASTA, T), T)
end
for T in msa_types
@test read_file(pf09645_fas, FASTA, T) == read_file(pf09645_fas, FASTA)
end
@testset "Download" begin
@test read_file(gaoetal2011, FASTA) == read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/Gaoetal2011.fasta",
FASTA,
)
@test read_file(pf09645_fas, FASTA) == read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.fasta.gz",
FASTA,
)
end
end
@testset "Output types" begin
gaoetal_msas = [read_file(gaoetal2011, FASTA, T) for T in msa_types]
pfam_msas = [read_file(pf09645_fas, FASTA, T) for T in msa_types]
@testset "Sequence Names" begin
@test sequencenames(gaoetal_msas[1]) == ["1", "2", "3", "4", "5", "6"]
@test sequencenames(pfam_msas[1]) == ["1", "2", "3", "4"]
for i = 2:4
@test sequencenames(gaoetal_msas[i]) ==
["SEQ1", "SEQ2", "SEQ3", "SEQ4", "SEQ5", "SEQ6"]
@test sequencenames(pfam_msas[i]) == [
"C3N734_SULIY/1-95",
"H2C869_9CREN/7-104",
"Y070_ATV/2-70",
"F112_SSV1/3-112",
]
end
end
@testset "Residues" begin
list_seq = [
res"DAWAEE",
res"DAWAEF",
res"DAWAED",
res"DAYCMD",
res"DAYCMT",
res"DAYCMT",
]
residues = permutedims(hcat(list_seq...), [2, 1])
for msa in gaoetal_msas
@test msa == residues
@test isa(getresidues(msa), Matrix{Residue})
@test getresidues(msa) == residues
@test getresiduesequences(msa) == list_seq
@test stringsequence(msa, 1) == "DAWAEE"
end
for msa in gaoetal_msas[2:end]
# getsequence returns a matrix, use vec(seq) or dropdims(seq, dims=1)
# to get a vector:
@test vec(getresidues(getsequence(msa, 1))) == res"DAWAEE"
@test dropdims(getresidues(getsequence(msa, 1)), dims = 1) ==
res"DAWAEE"
end
end
@testset "Size" begin
for msa in gaoetal_msas
@test size(msa, 1) == 6
@test size(msa, 2) == 6
@test view(msa, 4, :) == res"DAYCMD"
end
for msa in pfam_msas
@test size(msa, 1) == 4
@test size(msa, 2) == sum(F112_SSV1 .!= Ref('.')) # without inserts
@test view(msa, 4, :) == map(Residue, F112_SSV1[F112_SSV1.!=Ref('.')])
end
end
end
@testset "String input/output" begin
msa = read_file(gaoetal2011, FASTA)
fasta_string = """
>SEQ1
DAWAEE
>SEQ2
DAWAEF
>SEQ3
DAWAED
>SEQ4
DAYCMD
>SEQ5
DAYCMT
>SEQ6
DAYCMT
"""
@test parse_file(fasta_string, FASTA) == msa
out = IOBuffer()
print_file(out, msa, FASTA)
@test String(take!(out)) == fasta_string
end
@testset "File input/output" begin
msa = read_file(gaoetal2011, FASTA)
path = tempdir()
uncompressed = joinpath(path, ".tmp.fasta")
try
write_file(uncompressed, msa, FASTA)
@test read_file(uncompressed, FASTA) == msa
finally
if isfile(uncompressed)
rm(uncompressed)
end
end
compressed = joinpath(path, ".tmp.fasta.gz")
try
write_file(compressed, msa, FASTA)
@test read_file(compressed, FASTA) == msa
finally
if isfile(compressed)
rm(compressed)
end
end
end
@testset "Non standard residues and mapping" begin
seqs =
read_file(joinpath(DATA, "alphabet.fasta"), FASTA, generatemapping = true)
@test vec(seqs[1, :]) == res"ARNDCQEGHILKMFPSTWYV"
for i = 2:nsequences(seqs)
@test vec(seqs[i, :]) == res"ARXDCQEGHILKMFPSTWYV"
@test getsequencemapping(seqs, 1) == getsequencemapping(seqs, i)
end
end
end
@testset "Raw" begin
# AnnotatedMultipleSequenceAlignment
raw = read_file(joinpath(DATA, "gaps.txt"), Raw)
mat = getresidues(raw)
raw_string = """THAYQAIHQV
THAYQAIHQ-
THAYQAIH--
THAYQAI---
THAYQA----
THAYQ-----
THAY------
THA-------
TH--------
T---------
"""
@testset "Parse" begin
@test stringsequence(raw, "1") == "THAYQAIHQV"
@test stringsequence(raw, 10) == "T---------"
for i = 1:10
@test stringsequence(mat, i) == stringsequence(raw, i)
end
@test parse_file(raw_string, Raw) == raw
end
@testset "Print" begin
buffer = IOBuffer()
print_file(buffer, raw, Raw)
@test String(take!(buffer)) == raw_string
print_file(buffer, mat, Raw)
@test String(take!(buffer)) == raw_string
end
@testset "Stats" begin
@test gapfraction(mat) == 0.45
@test vec(gapfraction(mat, 1)) ≈
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
@test vec(gapfraction(mat, 2)) ≈
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
@test residuefraction(mat) == 0.55
@test vec(residuefraction(mat, 1)) ≈
[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
@test vec(residuefraction(mat, 2)) ≈
[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
@test vec(coverage(mat)) ≈ [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
@test vec(columngapfraction(mat)) ≈
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
end
@testset "Reference and gapstrip" begin
gs = gapstrip(mat, coveragelimit = 0.5, gaplimit = 0.5)
@test vec(getsequence(gs, 1)) == res"THAYQAIH"
@test ncolumns(gs) == 8
@test nsequences(gs) == 6
ref = setreference!(copy(mat), 2)
@test vec(getsequence(ref, 1)) == res"THAYQAIHQ-"
ref = adjustreference(ref)
@test vec(getsequence(ref, 1)) == res"THAYQAIHQ"
end
end
@testset "NBRF/PIR" begin
# Example NBRF file: http://iubio.bio.indiana.edu/soft/molbio/readseq/classic/src/Formats
# "The sequence is free format and may be interrupted by blanks for ease of reading"
example = joinpath(DATA, "example.nbrf")
# Alignment file (PIR) from https://salilab.org/modeller/9v7/manual/node445.html
modeller = joinpath(DATA, "modeller.pir.gz")
# http://emboss.sourceforge.net/docs/themes/seqformats/NbrfFormat.html
# "sequence may contain punctuation symbols to indicate various degrees of
# reliability of the data"
emboss = joinpath(DATA, "emboss.pir")
# http://caps.ncbs.res.in/pass2v3/pir.html
# Example from pass2 with spaces added at the end of the id lines.
pass2 = joinpath(DATA, "pass2.pir")
@testset "Read" begin
@test isa(read_file(modeller, PIR), AnnotatedMultipleSequenceAlignment)
for T in msa_types
@test isa(read_file(modeller, PIR, T), T)
end
msa = read_file(modeller, PIR)
@test size(msa) == (2, 106)
end
@testset "Print" begin
msa = read_file(example, PIR)
@test stringsequence(msa, 1) ==
"MTNIRKSHPLFKIINHSFIDLPAPSVTHICRDVNYGWLIRYTWIGGQPVEHPFIIIGQLASISYFSIILILMPISGIVEDKMLKWN"
out = IOBuffer()
print_file(out, msa, PIR)
@test String(take!(out)) == """
>P1;CBRT
Cytochrome b - Rat mitochondrion (SGC1)
MTNIRKSHPLFKIINHSFIDLPAPSVTHICRDVNYGWLIRYTWIGGQPVEHPFIIIGQLASISYFSIILILMPISGIVED
KMLKWN*
"""
out = IOBuffer()
print_file(out, msa.matrix, PIR) # NamedResidueMatrix{Array{Residue,2}}
@test String(take!(out)) == """
>XX;CBRT
MTNIRKSHPLFKIINHSFIDLPAPSVTHICRDVNYGWLIRYTWIGGQPVEHPFIIIGQLASISYFSIILILMPISGIVED
KMLKWN*
"""
out = IOBuffer()
print_file(out, msa.matrix.array, PIR) # Matrix{Residue}
@test String(take!(out)) == """
>XX;1
MTNIRKSHPLFKIINHSFIDLPAPSVTHICRDVNYGWLIRYTWIGGQPVEHPFIIIGQLASISYFSIILILMPISGIVED
KMLKWN*
"""
end
@testset "Gaps" begin
msa = read_file(modeller, PIR)
@test gapfraction(msa, 2) ≈ [0.0, 0.49056603773584906]
@test getannotsequence(msa, "5fd1", "Type") == "P1"
@test getannotsequence(msa, "5fd1", "Title") ==
"structureX:5fd1:1 :A:106 :A:ferredoxin:Azotobacter vinelandii: 1.90: 0.19"
end
@testset "String input/output" begin
emboss_string = """
>P1;AZBR
finger protein zfpA - turnip fern chloroplast
GDVE(G.K.G.I.F=T,M,C.S.Q,C.H.V,E.K.G.G.K.H)
FTGPNLHGLFGRK.TGQAVGYSYTAANK.NK.GIIWGDDTLM
EYLENPK.RYIPGTK.MVFTGLSK.YRE
RTNLIAYLK.EK.TAA*
"""
# MIToS always reads . as -
msa = read_file(emboss, PIR, deletefullgaps = false)
@test parse_file(emboss_string, PIR, deletefullgaps = false) == msa
out = IOBuffer()
print_file(out, msa, PIR)
@test String(take!(out)) == """
>P1;AZBR
finger protein zfpA - turnip fern chloroplast
GDVEG-K-G-I-FTMC-S-QC-H-VE-K-G-G-K-HFTGPNLHGLFGRK-TGQAVGYSYTAANK-NK-GIIWGDDTLMEY
LENPK-RYIPGTK-MVFTGLSK-YRERTNLIAYLK-EK-TAA*
"""
end
@testset "Spaces at the end of the id line" begin
lines = readlines(pass2)
@test lines[1] == ">P1;1bbha- "
@test lines[5] == ">P1;1cpq-- "
@test lines[9] == ">P1;256bb- "
msa = read_file(pass2, PIR)
@test sequencenames(msa) == ["1bbha-", "1cpq--", "256bb-"]
end
@testset "Duplicated identifiers" begin
duplicated_ids_file = joinpath(DATA, "duplicated_ids.pir")
@test_throws ArgumentError read_file(duplicated_ids_file, PIR)
end
end
@testset "A3M" begin
@testset "Adding insert gaps" begin
# Simple tests for adding insert gaps in different locations
@test MSA._add_insert_gaps!(["MAHI", "MgAHI"]) == ["M.AHI", "MgAHI"]
@test MSA._add_insert_gaps!(["MAHILLI", "MAHIgLLIhhh", "MAHILLI", "MAHILLI"]) ==
["MAHI.LLI...", "MAHIgLLIhhh", "MAHI.LLI...", "MAHI.LLI..."]
@test MSA._add_insert_gaps!(["MAhI", "MALh"]) == ["MAhI.", "MA.Lh"]
@test MSA._add_insert_gaps!(["MALI", "hhhMALI"]) == ["...MALI", "hhhMALI"]
@test MSA._add_insert_gaps!(["MAggLLI", "MAgggLLI", "MAgLLI", "MALLI"]) ==
["MAgg.LLI", "MAgggLLI", "MAg..LLI", "MA...LLI"]
end
@testset "Single insert column" begin
# A3M example from https://yanglab.qd.sdu.edu.cn/trRosetta/msa_format.html
# |insert
seq = "-----RTKRLREAVRVYLAENGrSHTVDIFDHLNDRFSWGATMNQVGNILAKDNRFEKVGHVRD-FFRGARYTVCVWDLAS-----------"
seq_without_insert = replace(seq, "r" => "")
msa = read_file(joinpath(DATA, "yanglab.a3m"), A3M)
@test ncolumns(msa) == 91 # 92 with the insert column
@test nsequences(msa) == 7
@test stringsequence(msa, "6") == seq_without_insert
@testset "Print inserts" begin
@testset "There are no inserts" begin
io = IOBuffer()
print_file(io, msa, A3M)
printed = String(take!(io))
# The insert column has been removed when reading the MSA
@test occursin(seq_without_insert, printed)
end
@testset "Keep inserts" begin
# Keeping the insert column when reading the MSA
msa = read_file(joinpath(DATA, "yanglab.a3m"), A3M, keepinserts = true)
@test ncolumns(msa) == 92
io = IOBuffer()
print_file(io, msa, A3M)
printed = String(take!(io))
@test occursin(seq, printed)
# Test that gaps are not added to the insert column for A3M
@test !occursin(".", printed)
@testset "Saving as A2M" begin
io = IOBuffer()
print_file(io, msa, A2M)
printed = String(take!(io))
# A2M keeps the insert column
@test occursin(seq, printed)
# and add explicit gaps for inserts
@test occursin(".", printed)
# Ensure the correct location of the gaps
@test occursin("RP.RN", printed)
end
end
end
end
end
@testset "Disambiguate Sequences Tests" begin
IDS = ["seq", "seq", "seq"]
old2new, new_IDS = MSA._disambiguate_sequences(IDS)
@test new_IDS == ["seq", "seq(1)", "seq(2)"]
@test old2new == Dict("seq" => ["seq", "seq(1)", "seq(2)"])
IDS = ["a", "a", "b", "b", "b"]
old2new, new_IDS = MSA._disambiguate_sequences(IDS)
@test new_IDS == ["a", "a(1)", "b", "b(1)", "b(2)"]
@test old2new == Dict("a" => ["a", "a(1)"], "b" => ["b", "b(1)", "b(2)"])
IDS = ["item", "item(1)", "item(1)", "item(2)"]
old2new, new_IDS = MSA._disambiguate_sequences(IDS)
@test new_IDS == ["item", "item(1)", "item(1)(1)", "item(2)"]
@test old2new == Dict(
"item" => ["item"],
"item(1)" => ["item(1)", "item(1)(1)"],
"item(2)" => ["item(2)"],
)
IDS = ["x", "x", "x", "x(1)", "x(1)"]
old2new, new_IDS = MSA._disambiguate_sequences(IDS)
@test new_IDS == ["x", "x(1)", "x(2)", "x(1)(1)", "x(1)(2)"]
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 8391 | @testset "Identity" begin
@testset "Float64" begin
@test_throws ErrorException percentidentity(res"AH", res"AGH")
@test percentidentity(res"AH", res"AH") == 100.0
@test percentidentity(res"AH", res"AG") == 50.0
@test percentidentity(res"AH", res"RG") == 0.0
@test percentidentity(res"AH-", res"AG-") == 50.0
@test percentidentity(res"A--", res"AG-") == 50.0
# Columns with XAA aren't counted
@test percentidentity(res"AXA-", res"AG--") ==
100 .* (1 + 0 + 0 + 0) / (1 + 0 + 1 + 0)
@test percentidentity(res"AAX-", res"AG--") ==
100 .* (1 + 0 + 0 + 0) / (1 + 1 + 0 + 0)
@test percentidentity(res"AH-", res"AX-") == 100.0
@test percentidentity(res"AH-", res"XG-") == 0.0
@test percentidentity(res"AGG", res"AHX") == 50.0
@test isnan(percentidentity(res"---", res"---"))
@test isnan(percentidentity(res"XX-", res"-XX"))
@test isnan(percentidentity(res"XXX", res"XXX"))
end
@testset "MSA 2x3" begin
αβ = (1, 20, 21, 22)
for i₁ in αβ, j₁ in αβ, k₁ in αβ
seq₁ = Residue[i₁, j₁, k₁]
for i₂ in αβ, j₂ in αβ, k₂ in αβ
seq₂ = Residue[i₂, j₂, k₂]
val = percentidentity(seq₁, seq₂)
if sum((seq₁ .== GAP) .& (seq₂ .== GAP)) +
sum((seq₁ .== XAA) .| (seq₂ .== XAA)) == 3
@test isnan(val)
elseif seq₁ == seq₂
@test val == 100.0
else
@test 100.0 >= val >= 0.0
end
end
end
end
@testset "Bool" begin
@test percentidentity(res"A--", res"AG-", 40.0)
@test !percentidentity(res"A--", res"AG-", 60)
# Columns with XAA aren't counted
@test percentidentity(res"AXA-", res"AG--", 40.0)
@test !percentidentity(res"AAX-", res"AG--", 60.0)
@test percentidentity(res"AH-", res"AX-", 100.0)
@test percentidentity(res"AH-", res"XG-", 0.0)
@test !percentidentity(res"AGG", res"AHX", 62.0) # 50% < 62%
end
@testset "MSA" begin
fasta = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA)
id = percentidentity(fasta)
@test id[1, 1] == 100.0
@test isapprox(id[1, 2], 83.33, atol = 0.01)
@test isapprox(id[1, 3], 83.33, atol = 0.01)
@test isapprox(id[3, 1], 83.33, atol = 0.01)
@test isapprox(id[1, 6], 33.33, atol = 0.01)
@test isapprox(id[4, 5], 83.33, atol = 0.01)
@test id[5, 6] == 100.0
@test maximum(id) == 100.0
@test isapprox(minimum(id), 33.33, atol = 0.01)
@testset "SequenceIdentityMatrix" begin
@test isapprox(sum(id[:, 3]), 100.0 + 50.0 + 2 * (200 / 6) + 2 * (500 / 6))
id[4, 3] = 80
@test isapprox(sum(id[:, 3]), 100.0 + 80.0 + 2 * (200 / 6) + 2 * (500 / 6))
end
@testset "Gaps" begin
aln = read_file(joinpath(DATA, "gaps.txt"), Raw)
id = percentidentity(aln)
@test id[1, 1] == 100.0
@test id[1, 2] == 90.0
@test id[1, 3] == 80.0
@test id[2, 3] ≈ 800 / 9
end
@testset "Mean percent identity" begin
aln = Residue[
'-' '-' 'G' 'G' 'G' '-'
'-' '-' '-' 'G' 'G' 'G'
]
# identities 0 0 0 1 1 0 sum 2
# aligned res 0 0 1 1 1 1 sum 4
@test percentidentity(aln)[1, 2] == 50.0 # 2 / 4
@test meanpercentidentity(aln) == 50.0
msa = rand(Residue, 400, 2)
msa300 = msa[1:300, :]
@test mean(getlist(percentidentity(msa300))) == meanpercentidentity(msa300)
@test isapprox(
mean(getlist(percentidentity(msa))),
meanpercentidentity(msa),
atol = 0.5,
)
@test mean(getlist(percentidentity(msa))) ==
meanpercentidentity(msa, exact = true)
end
end
@testset "Percent Similarity" begin
fasta = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA)
@testset "Gaps" begin
@test percentsimilarity(res"AH", res"IM") == 50.0
@test percentsimilarity(res"-AH", res"--H") == 50.0
@test percentsimilarity(res"-AH", res"-AH") == 100.0
@test percentsimilarity(res"-AH", res"-AH") ==
percentsimilarity(res"AH", res"AH")
@test percentsimilarity(res"-AH", res"-AH") ==
percentsimilarity(res"--AH", res"--AH")
# Residues outside the alphabet aren't counted (i.e.: XAA)
@test percentsimilarity(res"-AH", res"-AH") ==
percentsimilarity(res"XXAH", res"AAAH")
@test percentsimilarity(res"AH", res"AH") ==
percentsimilarity(res"GGAH", res"AAAH", ReducedAlphabet("AH"))
end
@testset "Using SMS's Ident and Sim residue groups" begin
sim = percentsimilarity(
fasta,
ReducedAlphabet("(GAVLI)(FYW)(ST)(KRH)(DENQ)P(CM)"),
)
@test eltype(sim) == Float64
@test sim[1, 1] == 100.0
@test isapprox(sim[1, 2], 83.33, atol = 0.01)
@test isapprox(sim[1, 3], 100.00, atol = 0.01)
@test isapprox(sim[1, 4], 66.67, atol = 0.01)
@test isapprox(sim[1, 5], 50.00, atol = 0.01)
@test isapprox(sim[1, 6], 50.00, atol = 0.01)
@test isapprox(sim[2, 3], 83.33, atol = 0.01)
@test isapprox(sim[2, 4], 50.00, atol = 0.01)
@test isapprox(sim[2, 5], 50.00, atol = 0.01)
@test isapprox(sim[2, 6], 50.00, atol = 0.01)
@test isapprox(sim[3, 4], 66.67, atol = 0.01)
@test isapprox(sim[3, 5], 50.00, atol = 0.01)
@test isapprox(sim[3, 6], 50.00, atol = 0.01)
@test isapprox(sim[4, 5], 83.33, atol = 0.01)
@test isapprox(sim[4, 6], 83.33, atol = 0.01)
@test isapprox(sim[5, 6], 100.00, atol = 0.01)
end
@testset "Using Bio3D's (2.2) seqidentity residue groups" begin
sim = percentsimilarity(
fasta,
ReducedAlphabet("(GA)(MVLI)(FYW)(ST)(KRH)(DE)(NQ)PC"),
out = Float16,
)
bio3d =
[
1.000 0.833 1.000 0.667 0.500 0.500
0.833 1.000 0.833 0.500 0.500 0.500
1.000 0.833 1.000 0.667 0.500 0.500
0.667 0.500 0.667 1.000 0.833 0.833
0.500 0.500 0.500 0.833 1.000 1.000
0.500 0.500 0.500 0.833 1.000 1.000
] .* 100.00
@test eltype(sim) == Float16
for i = 1:6
for j = 1:6
@test isapprox(sim[i, j], bio3d[i, j], atol = 0.1)
end
end
end
@testset "Percent identity" begin
for αβ in (UngappedAlphabet(), GappedAlphabet())
@test_throws ErrorException percentsimilarity(res"AH", res"AGH", αβ)
@test percentsimilarity(res"AH", res"AH", αβ) == 100.0
@test percentsimilarity(res"AH", res"AG", αβ) == 50.0
@test percentsimilarity(res"AH", res"RG", αβ) == 0.0
@test percentsimilarity(res"AH-", res"AG-", αβ) == 50.0
@test percentsimilarity(res"A--", res"AG-", αβ) == 50.0
# Columns with XAA aren't counted
@test percentsimilarity(res"AXA-", res"AG--", αβ) ==
100 .* (1 + 0 + 0 + 0) / (1 + 0 + 1 + 0)
@test percentsimilarity(res"AAX-", res"AG--", αβ) ==
100 .* (1 + 0 + 0 + 0) / (1 + 1 + 0 + 0)
@test percentsimilarity(res"AH-", res"AX-", αβ) == 100.0
@test percentsimilarity(res"AH-", res"XG-", αβ) == 0.0
@test percentsimilarity(res"AGG", res"AHX", αβ) == 50.0
@test isnan(percentsimilarity(res"---", res"---", αβ))
@test isnan(percentsimilarity(res"XX-", res"-XX", αβ))
@test isnan(percentsimilarity(res"XXX", res"XXX", αβ))
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 2264 | @testset "MSA Annotations" begin
pfam = read_file(
joinpath(DATA, "PF09645_full.stockholm"),
Stockholm,
generatemapping = true,
useidcoordinates = true,
)
F112_SSV1 = collect(
string(
".....QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAIC",
"ERHPDECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ.....",
),
)
fasta = read_file(joinpath(DATA, "Gaoetal2011.fasta"), FASTA, generatemapping = true)
@testset "Mapping" begin
@test minimum(getsequencemapping(pfam, 4)) == 3
@test maximum(getsequencemapping(pfam, "F112_SSV1/3-112")) == 112
@test getcolumnmapping(pfam) ==
filter(i -> F112_SSV1[i] !== '.', eachindex(F112_SSV1))
@test length(pfam.annotations.sequences) == 9
@test getsequencemapping(fasta, 1) == [1, 2, 3, 4, 5, 6]
@test getsequencemapping(fasta, "SEQ2") == [1, 2, 3, 4, 5, 6]
@test getcolumnmapping(fasta) == [1, 2, 3, 4, 5, 6]
end
@testset "Modifications" begin
buffer = IOBuffer()
print(buffer, annotations(pfam))
printed = split(String(take!(buffer)), '\n')
@test length(printed) == 17
@test printed[1] == string("#=GF NCol\t120")
@test printed[2] == string(
"#=GF ColMap\t6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22",
",23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,",
"43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,",
"63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,",
"84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,",
"103,104,105,106,107,108,109,110,111,112,113,114,115",
)
@test occursin(r"MIToS_", printed[3])
@test occursin(r"MIToS_", printed[4])
end
@testset "Delete modification annotations" begin
buffer = IOBuffer()
delete_annotated_modifications!(pfam)
print(buffer, annotations(pfam))
printed = split(String(take!(buffer)), '\n')
@test length(printed) == 17 - 2 # 2 MIToS annotations
@test !occursin(r"MIToS_", printed[3])
@test !occursin(r"MIToS_", printed[4])
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 5978 | @testset "MSAEditing.jl" begin
msa_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
MultipleSequenceAlignment,
AnnotatedMultipleSequenceAlignment,
)
pf09645_sto = joinpath(DATA, "PF09645_full.stockholm")
gaoetal2011 = joinpath(DATA, "Gaoetal2011.fasta")
gaoetal_msas = [read_file(gaoetal2011, FASTA, T) for T in msa_types]
pfam_msas = [read_file(pf09645_sto, Stockholm, T) for T in msa_types]
pfam = pfam_msas[end]
pfam_na = pfam_msas[end-1] # na: not annotated
@testset "Boolean mask array" begin
matrix_mask = pfam[1:1, :] .== GAP
@test size(matrix_mask) == (1, 110)
@test size(MSA._column_mask(matrix_mask, pfam)) == (110,)
@test MSA._column_mask(vec(matrix_mask), pfam) == vec(matrix_mask)
@test MSA._column_mask(col -> col[1] == GAP, pfam) ==
MSA._column_mask(matrix_mask, pfam)
int_mask = collect(eachindex(matrix_mask))[vec(matrix_mask)] # i.e. index selection
@test MSA._column_mask(int_mask, pfam) == int_mask
@test MSA._column_mask(Colon(), pfam) == Colon()
matrix_mask = pfam[:, 1:1] .== Residue('-')
@test size(matrix_mask) == (4, 1)
@test size(MSA._sequence_mask(matrix_mask, pfam)) == (4,)
@test MSA._sequence_mask(vec(matrix_mask), pfam) == vec(matrix_mask)
@test MSA._sequence_mask(seq -> seq[1] == GAP, pfam) ==
MSA._sequence_mask(matrix_mask, pfam)
end
@testset "filtersequences!" begin
for msa in pfam_msas[3:4]
@test filtersequences!(copy(msa), 1:4 .< 3) == filtersequences(msa, 1:4 .< 3)
@test nsequences(msa) == 4
end
for msa in pfam_msas
@test getsequence(filtersequences(msa, [1, 2, 3, 4] .== 3), 1) ==
getsequence(msa, 3)
@test getsequence(filtersequences(msa, [1, 2, 3, 4] .> 2), 2) ==
getsequence(msa, 4)
@test getsequence(filtersequences(msa, Bool[false, false, true, true]), 2) ==
getsequence(msa, 4)
@test_throws AssertionError filtersequences(
msa,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] .> 2,
)
@test_throws AssertionError filtersequences(msa, [1, 2, 3] .> 2)
end
end
@testset "filtercolumns!" begin
for msa in pfam_msas[3:4]
@test filtercolumns!(copy(msa), 1:110 .< 11) == filtercolumns(msa, 1:110 .< 11)
@test ncolumns(msa) == 110
end
for msa in pfam_msas
@test vec(
getresidues(getsequence(filtercolumns(msa, collect(1:110) .<= 10), 4)),
) == res"QTLNSYKMAE"
@test_throws AssertionError filtercolumns(msa, [1, 2, 3] .> 2)
@test_throws AssertionError filtercolumns(msa, collect(1:200) .<= 10)
end
@testset "filtercolumns! for sequences" begin
annseq = getsequence(pfam, 4)
seq = getsequence(pfam_na, 4) # na: not annotated
# Sequences are matrices
@test annseq[1, vec(annseq .== Residue('Q'))] == res"QQQQQQQQQQQQQQ"
@test seq[1, vec(seq .== Residue('Q'))] == res"QQQQQQQQQQQQQQ"
filtered_annseq = filtercolumns!(copy(annseq), annseq .== Residue('Q'))
filtered_seq = filtercolumns!(copy(seq), seq .== Residue('Q'))
@test filtercolumns(seq, seq .== Residue('Q')) ==
filtercolumns(seq, seq .== Residue('Q'))
@test_throws AssertionError filtercolumns(annseq, 1:(length(seq)-10) .> 2)
@test_throws AssertionError filtercolumns(seq, 1:(length(seq)+10) .<= 10)
# Sequences are matrices
@test vec(getresidues(filtered_annseq)) == res"QQQQQQQQQQQQQQ"
@test vec(getresidues(filtered_seq)) == res"QQQQQQQQQQQQQQ"
@test getannotcolumn(filtered_annseq, "SS_cons") == "XHHEXXXXXXXXXX"
@test getannotresidue(filtered_annseq, "SS") == "XHHEXXXXXXXXXX"
end
end
@testset "Reference" begin
@testset "setreference" begin
for msa in pfam_msas[2:4]
copy_msa = copy(msa)
setreference!(copy_msa, 4)
# Sequences are matrices
@test vec(getresidues(getsequence(copy_msa, 1))) ==
res"QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ"
setreference!(copy_msa, "C3N734_SULIY/1-95")
@test_throws ErrorException setreference!(copy_msa, "FALSE_SEQID/1-95")
@test vec(getresidues(getsequence(copy_msa, 4))) ==
res"QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ"
@test copy_msa == msa
end
end
@testset "gapstrip!" begin
for msa in pfam_msas[3:4]
copy_msa = copy(msa)
setreference!(copy_msa, 4)
gapstrip!(copy_msa, gaplimit = 1.0, coveragelimit = 0.0)
@test size(copy_msa) == (4, 110)
setreference!(copy_msa, 1)
gapstrip!(copy_msa)
residuefraction(copy_msa[1, :]) == 1.0
end
for msa in pfam_msas[1:2]
@test size(gapstrip(msa, gaplimit = 1.0, coveragelimit = 0.0)) == (4, 92)
@test residuefraction(gapstrip(msa)[1, :]) == 1.0
end
end
@testset "adjustreference!" begin
for msa in pfam_msas[3:4]
copy_msa = copy(msa)
adjustreference!(copy_msa)
residuefraction(copy_msa[1, :]) == 1.0
end
for msa in pfam_msas[1:2]
@test residuefraction(adjustreference(msa)[1, :]) == 1.0
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1147 | @testset "MSA Stats" begin
msa_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
MultipleSequenceAlignment,
AnnotatedMultipleSequenceAlignment,
)
pf09645_sto = joinpath(DATA, "PF09645_full.stockholm")
gaoetal2011 = joinpath(DATA, "Gaoetal2011.fasta")
gaoetal_msas = [read_file(gaoetal2011, FASTA, T) for T in msa_types]
pfam_msas = [read_file(pf09645_sto, Stockholm, T) for T in msa_types]
@testset "Gaps" begin
for msa in gaoetal_msas
@test gapfraction(msa) == 0.0
@test gapfraction(getsequence(msa, 1)) == 0.0
@test gapfraction(msa[1, :]) == 0.0
@test residuefraction(msa) == 1.0
@test sum(coverage(msa)) == 6.0
end
for msa in pfam_msas
@test gapfraction(getsequence(msa, 4)) == 0.0
@test gapfraction(msa[:, 1]) == 0.75
@test gapfraction(msa[:, 1]) == 0.75
@test residuefraction(getsequence(msa, 4)) == 1.0
@test residuefraction(msa[:, 1]) == 0.25
@test coverage(msa)[4] == 1.0
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 13618 | @testset "MultipleSequenceAlignment" begin
@testset "Type Hierarchy" begin
@test AbstractAlignedObject <: AbstractMatrix{Residue}
@test AbstractMultipleSequenceAlignment <: AbstractAlignedObject
@test AbstractAlignedSequence <: AbstractAlignedObject
@test MultipleSequenceAlignment <: AbstractMultipleSequenceAlignment
@test AnnotatedMultipleSequenceAlignment <: AbstractMultipleSequenceAlignment
@test AlignedSequence <: AbstractAlignedSequence
@test AnnotatedAlignedSequence <: AbstractAlignedSequence
@test AnnotatedMultipleSequenceAlignment <: AnnotatedAlignedObject
@test AnnotatedAlignedSequence <: AnnotatedAlignedObject
@test MultipleSequenceAlignment <: UnannotatedAlignedObject
@test AlignedSequence <: UnannotatedAlignedObject
end
@testset "Creation" begin
M = reshape(reinterpret(Residue, collect(1:21)), (3, 7))
m = NamedArray(M)
T = permutedims(M, [2, 1])
S = reshape(reinterpret(Residue, collect(1:21)), (1, 21))
s = NamedArray(S)
@test MultipleSequenceAlignment(M) == MultipleSequenceAlignment(m)
@test AnnotatedMultipleSequenceAlignment(M) == AnnotatedMultipleSequenceAlignment(m)
@test AlignedSequence(S) == AlignedSequence(s)
@test AnnotatedAlignedSequence(S) == AnnotatedAlignedSequence(s)
@test MultipleSequenceAlignment(M) != MultipleSequenceAlignment(T)
@test AnnotatedMultipleSequenceAlignment(M) != AnnotatedMultipleSequenceAlignment(T)
@test_throws AssertionError AlignedSequence(M)
@test_throws AssertionError AnnotatedAlignedSequence(M)
end
@testset "MSA & sequences" begin
M = reshape(reinterpret(Residue, collect(1:21)), (3, 7))
msa = MultipleSequenceAlignment(M)
annotated_msa = AnnotatedMultipleSequenceAlignment(M)
S = reshape(reinterpret(Residue, collect(1:21)), (1, 21))
sequence = AlignedSequence(S)
annotated_sequence = AnnotatedAlignedSequence(S)
@testset "Getters" begin
@test isa(annotations(annotated_msa), Annotations)
@test isa(annotations(annotated_sequence), Annotations)
# unannotated objects return empty annotations
for unannotated_object in
(Matrix{Residue}(M), msa, Matrix{Residue}(S), sequence)
@test isempty(annotations(unannotated_object))
@test isa(annotations(unannotated_object), Annotations)
end
for object in (msa, annotated_msa, sequence, annotated_sequence)
@test isa(namedmatrix(object), NamedResidueMatrix{Array{Residue,2}})
end
end
@testset "Dimension names" begin
for object in (msa, annotated_msa, sequence, annotated_sequence)
@test dimnames(namedmatrix(object)) == ["Seq", "Col"]
end
end
@testset "Convert" begin
msa2annot = AnnotatedMultipleSequenceAlignment(msa)
annot2msa = MultipleSequenceAlignment(annotated_msa)
seq2annot = AnnotatedAlignedSequence(sequence)
annot2seq = AlignedSequence(annotated_sequence)
@test isa(annotations(msa2annot), Annotations)
@test isa(annotations(seq2annot), Annotations)
# unannotated objects return empty annotations
for unannotated_object in (annot2msa, annot2seq)
@test isempty(annotations(unannotated_object))
@test isa(annotations(unannotated_object), Annotations)
end
@test namedmatrix(msa2annot) == namedmatrix(msa)
@test namedmatrix(annot2msa) == namedmatrix(annotated_msa)
@test namedmatrix(seq2annot) == namedmatrix(sequence)
@test namedmatrix(annot2seq) == namedmatrix(annotated_sequence)
end
@testset "AbstractArray Interface" begin
@test size(msa) == (3, 7)
@test size(annotated_msa) == (3, 7)
@test size(sequence) == (1, 21)
@test size(annotated_sequence) == (1, 21)
for object in (msa, annotated_msa, sequence, annotated_sequence)
@test length(object) == 21
end
end
@testset "Indexing" begin
for object in (msa, annotated_msa)
for i = 1:3, j = 1:7
@test object[string(i), string(j)] == object[i, j]
end
end
for object in (sequence, annotated_sequence)
for j = 1:7
@test object["1", string(j)] == object[1, j]
@test object[j] == object[1, j]
# Special sequence indexing:
@test object[string(j)] == object[1, j]
end
end
end
@testset "Show" begin
out = IOBuffer()
show(out, MIME"text/plain"(), msa)
str = String(take!(out))
@test startswith(str, "MultipleSequenceAlignment : ")
@test occursin("Seq", str)
@test occursin("Col", str)
@test length(split(str, '\n')) == 6
show(out, MIME"text/plain"(), annotated_msa)
str = String(take!(out))
@test startswith(
str,
"AnnotatedMultipleSequenceAlignment with 0 annotations : ",
)
@test occursin("Seq", str)
@test occursin("Col", str)
@test length(split(str, '\n')) == 6
show(out, MIME"text/plain"(), sequence)
str = String(take!(out))
@test startswith(str, "AlignedSequence : ")
@test occursin("Seq", str)
@test occursin("Col", str)
@test length(split(str, '\n')) == 4
show(out, MIME"text/plain"(), annotated_sequence)
str = String(take!(out))
@test startswith(str, "AnnotatedAlignedSequence with 0 annotations : ")
@test occursin("Seq", str)
@test occursin("Col", str)
@test length(split(str, '\n')) == 4
end
@testset "Transpose, i.e. permutedims" begin
@test size(permutedims(msa)) == (7, 3)
@test size(permutedims(annotated_msa)) == (7, 3)
@test size(permutedims(sequence)) == (21, 1)
@test size(permutedims(annotated_sequence)) == (21, 1)
end
@testset "Get residues" begin
@test getresidues(msa) == M
@test getresidues(annotated_msa) == M
@test getresidues(sequence) == S
@test getresidues(annotated_sequence) == S
for object in (msa, annotated_msa, sequence, annotated_sequence)
@test isa(getresidues(object), Matrix{Residue})
end
for object in (msa, annotated_msa, sequence, annotated_sequence)
@test getresiduesequences(msa) == [res"ADEIMSY", res"RCGLFTV", res"NQHKPW-"]
end
end
@testset "Size" begin
for object in (M, NamedArray(M), msa, annotated_msa)
@test ncolumns(object) == 7
@test nsequences(object) == 3
end
for object in (S, NamedArray(S), sequence, annotated_sequence)
@test ncolumns(object) == 21
@test nsequences(object) == 1
end
end
@testset "Get sequences" begin
for object in (msa, annotated_msa)
for seq = 1:3
@test getsequence(object, string(seq)) == getsequence(msa, seq)
@test size(getsequence(msa, seq)) == (1, 7)
end
end
end
@testset "Sequence names" begin
for object in (M, NamedArray(M), msa, annotated_msa)
@test sequencenames(object) == ["1", "2", "3"]
# Iterators
iterator = sequencename_iterator(object)
@test first(iterator) == "1"
@test length(iterator) == 3
@test !isempty(iterator)
@test collect(iterator) == ["1", "2", "3"]
@test_throws MethodError iterator[1] # no getindex defined for iterators
end
end
@testset "Rename sequences" begin
# rename_sequences!
for object in (NamedArray(M), msa, annotated_msa)
copied_object = deepcopy(object)
if isa(copied_object, AnnotatedMultipleSequenceAlignment)
setannotsequence!(copied_object, "1", "Name", "One")
setannotresidue!(copied_object, "1", "SS", "HHHHHHH")
end
new_object = rename_sequences!(copied_object, ["I", "II", "III"])
@test new_object == copied_object
@test sequencenames(copied_object) == ["I", "II", "III"]
if isa(copied_object, AnnotatedMultipleSequenceAlignment)
@test getannotsequence(copied_object, "I", "Name") == "One"
@test getannotresidue(copied_object, "I", "SS") == "HHHHHHH"
end
end
# rename_sequences
for object in (NamedArray(M), msa, annotated_msa)
new_object = rename_sequences(object, ["I", "II", "III"])
@test sequencenames(object) == ["1", "2", "3"]
@test sequencenames(new_object) == ["I", "II", "III"]
end
# rename one or two sequences
for object in (NamedArray(M), msa, annotated_msa)
copied_object = deepcopy(object)
if isa(copied_object, AnnotatedMultipleSequenceAlignment)
setannotsequence!(copied_object, "1", "Name", "One")
setannotresidue!(copied_object, "1", "SS", "HHHHHHH")
end
# Testing only rename_sequences with Pairs
# as rename_sequences calls rename_sequences! and
# Pairs are transformed to Dict and then to Vectors using _newnames
new_object = rename_sequences(copied_object, "1" => "I", "2" => "II")
@test sequencenames(copied_object) == ["1", "2", "3"]
@test sequencenames(new_object) == ["I", "II", "3"]
if isa(new_object, AnnotatedMultipleSequenceAlignment)
@test getannotsequence(new_object, "I", "Name") == "One"
@test getannotresidue(new_object, "I", "SS") == "HHHHHHH"
end
end
end
@testset "Column names" begin
for object in (M, NamedArray(M), msa, annotated_msa)
@test columnnames(object) == ["1", "2", "3", "4", "5", "6", "7"]
# Iterators
iterator = columnname_iterator(object)
@test first(iterator) == "1"
@test length(iterator) == 7
@test !isempty(iterator)
@test collect(iterator) == ["1", "2", "3", "4", "5", "6", "7"]
@test_throws MethodError iterator[1] # no getindex defined for iterators
end
end
@testset "Column mapping" begin
for object in (NamedArray(M), msa, annotated_msa)
@test getcolumnmapping(object) == [1, 2, 3, 4, 5, 6, 7]
end
end
@testset "Sequence as string" begin
for object in (M, NamedArray(M), msa, annotated_msa)
@test stringsequence(object, 1) == "ADEIMSY"
@test stringsequence(getsequence(object, 1)) == "ADEIMSY"
end
for object in (NamedArray(M), msa, annotated_msa)
@test stringsequence(msa, "1") == "ADEIMSY"
end
end
@testset "Copy and setindex!" begin
copy_msa = copy(msa)
deepcopy_msa = deepcopy(msa)
copy_annotated_msa = copy(annotated_msa)
deepcopy_annotated_msa = deepcopy(annotated_msa)
for x in [copy_msa, deepcopy_msa, copy_annotated_msa, deepcopy_annotated_msa]
x[1, :] = res"YSMIEDA"
@test vec(x[1, :]) == res"YSMIEDA"
x["2", :] = res"YSMIEDA"
@test vec(x["2", :]) == res"YSMIEDA"
x[:, 1] = res"YYY"
@test vec(x[:, 1]) == res"YYY"
x[:, "2"] = res"YYY"
@test vec(x[:, "2"]) == res"YYY"
x[end, end] = 'X'
@test x[end, end] == XAA
@test length(unique(x)) != 21
end
copy_seq = copy(sequence)
deepcopy_seq = deepcopy(sequence)
copy_annotated_seq = copy(annotated_sequence)
deepcopy_annotated_seq = deepcopy(annotated_sequence)
for x in [copy_seq, deepcopy_seq, copy_annotated_seq, deepcopy_annotated_seq]
x[1] = XAA
@test x[1] == XAA
x["1", "2"] = XAA
@test x["1", "2"] == XAA
x[end] = 'X'
@test x[end] == XAA
# Special setindex! for sequences:
x["3"] = GAP
@test x["3"] == GAP
@test length(unique(x)) == 19
end
@test length(unique(msa)) == 21
@test length(unique(annotated_msa)) == 21
@test length(unique(sequence)) == 21
@test length(unique(annotated_sequence)) == 21
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4025 | @testset "Residue" begin
@testset "Comparisons" begin
@test GAP == GAP
@test XAA == XAA
@test GAP != XAA
for i = 1:22, j = 1:22
if j != i
@test Residue(i) != Residue(j)
else
@test Residue(i) == Residue(j)
end
end
end
@testset "Convert" begin
alphabet = "ARNDCQEGHILKMFPSTWYV"
@test Int[Residue(char) for char in alphabet] == Int[i for i = 1:20]
@test Int[Residue(char) for char in lowercase(alphabet)] == Int[21 for i = 1:20]
@testset "GAP" begin
@test Int(GAP) == 21
@test Char(GAP) == '-'
@test GAP == Residue('-')
@test GAP == Residue('*')
@test GAP == Residue('.')
for X in "UOBZJX"
@test GAP != Residue(X)
@test GAP == Residue(lowercase(X))
end
@test GAP != Residue(42)
@test GAP != Residue(-1)
end
@testset "XAA" begin
@test Int(XAA) == 22
@test Char(XAA) == 'X'
for X in "UOBZJX"
@test XAA == Residue(X)
@test XAA != Residue(lowercase(X))
end
@test XAA == Residue(42)
@test XAA == Residue(-1)
end
end
@testset "Valid Residues" begin
for res in res"ARNDCQEGHILKMFPSTWYV-X"
@test isvalid(res)
end
@test isvalid(Residue('ñ'))
@test isvalid(Residue(42))
@test !isvalid(reinterpret(Residue, 42))
end
@testset "Strings" begin
alphabet = "ARNDCQEGHILKMFPSTWYV-X"
@test res"ARNDCQEGHILKMFPSTWYV-X" == Residue[char for char in alphabet]
@test String(res"ARNDCQEGHILKMFPSTWYV-X") == alphabet
end
@testset "String vector" begin
msa = ["DAWAEF", "DAWAED", "DAYCMD"]
badmsa = ["DAWAEF", "DAWAED", "DAM"]
@test convert(Matrix{Residue}, msa) == Residue[
'D' 'A' 'W' 'A' 'E' 'F'
'D' 'A' 'W' 'A' 'E' 'D'
'D' 'A' 'Y' 'C' 'M' 'D'
]
@test_throws ErrorException convert(Matrix{Residue}, String[])
@test_throws ErrorException convert(Matrix{Residue}, badmsa)
end
@testset "Random" begin
for i = 1:5000
res = rand(Residue)
@test res != GAP
@test res != XAA
@test isvalid(res)
@test typeof(res) == Residue
end
end
@testset "Initialized arrays" begin
@test size(rand(Residue, 20)) == (20,)
@test typeof(rand(Residue, 20)) == Array{Residue,1}
@test size(rand(Residue, 20, 30)) == (20, 30)
@test typeof(rand(Residue, 20, 30)) == Array{Residue,2}
@test zero(Residue) == GAP
@test one(Residue) == XAA
@test zeros(Residue, 4) == [GAP, GAP, GAP, GAP]
@test ones(Residue, 4) == [XAA, XAA, XAA, XAA]
@test zeros(Residue, 2, 2) == [
GAP GAP
GAP GAP
]
@test ones(Residue, 2, 2) == [
XAA XAA
XAA XAA
]
end
@testset "Other Base methods" begin
for i = 1:22
@test bitstring(i) == bitstring(Residue(i))
end
@test typemin(Residue) == Residue(1)
@test typemax(Residue) == XAA
end
@testset "Show" begin
io = IOBuffer()
alphabet = "ARNDCQEGHILKMFPSTWYV-X"
for char in alphabet
show(io, Residue(char))
@test String(take!(io)) == String([char])
end
show(io, reinterpret(Residue, -30))
@test String(take!(io)) == "�"
end
@testset "Print" begin
io = IOBuffer()
alphabet = "ARNDCQEGHILKMFPSTWYV-X"
for char in alphabet
print(io, Residue(char))
@test String(take!(io)) == String([char])
end
print(io, reinterpret(Residue, -30))
@test String(take!(io)) == "X"
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 7954 | @testset "AnnotatedSequence" begin
seq = AnnotatedSequence("id", res"ARNDCQEGHILKMFPSTWYV", Annotations())
seq_from_string = AnnotatedSequence("id", "arn-.DCQEGHILKMFPSTWYV", Annotations())
seq_no_id = AnnotatedSequence(res"ARNDCQEGHILKMFPSTWYV", Annotations())
seq_from_string_no_id = AnnotatedSequence("arn-.DCQEGHILKMFPSTWYV", Annotations())
sequences = [seq, seq_from_string, seq_no_id, seq_from_string_no_id]
@testset "Creation" begin
for sequence in sequences
@test dimnames(sequence) == ["Seq", "Pos"]
@test namedmatrix(sequence) == namedmatrix(seq) # as the ids are different
end
end
@testset "Interfaces : AbstractArray" begin
@test IndexStyle(AnnotatedSequence) == IndexLinear()
@test size(seq) == (1, 20) # A sequence is stored as a 1xN matrix
@test length(seq) == 20
@test getindex(seq, 1) == Residue('A')
@test getindex(seq, 1:3) == res"ARN"
@test join(Char(res) for res in seq) == "ARNDCQEGHILKMFPSTWYV" # Iteration
end
@testset "Sequence ID" begin
@test sequence_id(seq) == "id"
@test sequence_id(seq_from_string) == "id"
@test sequence_id(seq_no_id) == ""
@test sequence_id(seq_from_string_no_id) == ""
end
@testset "Equality" begin
# ==
@test seq == seq_from_string # same id and sequence
@test seq_no_id == seq_from_string_no_id
@test seq !== seq_no_id # different id, same sequence
@test seq_from_string !== seq_from_string_no_id
# hash
@test hash(seq) == hash(seq_from_string)
@test hash(seq_no_id) == hash(seq_from_string_no_id)
@test hash(seq) !== hash(seq_no_id)
@test hash(seq_from_string) !== hash(seq_from_string_no_id)
# isequal
@test isequal(seq, seq_from_string)
@test isequal(seq_no_id, seq_from_string_no_id)
@test !isequal(seq, seq_no_id)
@test !isequal(seq_from_string, seq_from_string_no_id)
end
@testset "String" begin
@test stringsequence(seq) == "ARNDCQEGHILKMFPSTWYV"
@test join(seq) == "ARNDCQEGHILKMFPSTWYV"
end
@testset "From file" begin
pir = read_file(joinpath(DATA, "emboss.pir"), PIRSequences)
@test isa(pir, Vector{AnnotatedSequence})
@test length(pir) == 1 # there is only one sequence in the file
seq = pir[1]
@test join(seq[1:12]) == "GDVEGKGIFTMC"
@test sequence_id(seq) == "AZBR"
@test getannotsequence(seq, "Type") == "P1"
@test getannotsequence(seq, "Title") ==
"finger protein zfpA - turnip fern chloroplast"
# This should show a warning, as the sequence name is given as a feature
@test getannotsequence(seq, "AZBR", "Type") == "Type" # returns the default value
@test getannotsequence(seq, "AZBR", "Title") == "Title"
@testset "ThorAxe's transcripts.pir" begin
# This file contains the first four sequences from the ThorAxe output for POLR3B
# The annotations correspond to s-exon symbols. There is one symbol per residue.
# The list of symbols is at the end of the sequence identifier.
transcripts = read_file(joinpath(DATA, "transcripts.pir"), PIRSequences)
@test isa(transcripts, Vector{AnnotatedSequence})
@test length(transcripts) == 4
for seq in transcripts
@test !isempty(annotations(seq))
@test length(seq) == length(getannotsequence(seq, "Title"))
@test split(sequence_id(seq))[end] ==
join(unique(getannotsequence(seq, "Title")))
end
end
@testset "UniProt's FASTA (canonical & isoform)" begin
# This file contains the canonical and isoform sequences for the human protein
# POLR3B (Q9NW08) in FASTA format as downloaded from UniProt.
fasta = read_file(joinpath(DATA, "Q9NW08.fasta"), FASTASequences)
@test isa(fasta, Vector{AnnotatedSequence})
@test length(fasta) == 2
Ns = [1_133, 1_075]
for (i, seq) in enumerate(fasta)
@test startswith(sequence_id(seq), "sp|Q9NW08")
@test isempty(getannotsequence(seq))
@test endswith(join(seq), "SKYNE")
@test length(seq) == Ns[i]
end
end
end
@testset "From an MSA" begin
raw = read_file(joinpath(DATA, "gaps.txt"), RawSequences)
fasta = read_file(joinpath(DATA, "ids.fasta"), FASTASequences) # same sequence, different ids
@test isa(raw, Vector{AnnotatedSequence})
@test isa(fasta, Vector{AnnotatedSequence})
@test length(raw) == 10
@test length(fasta) == 4
for (i, seq) in enumerate(raw) # from gaps.txt
n_res = 10 - (i - 1) # the first sequence has 0 gaps, and the last one has 9 gaps
@test size(seq) == (1, n_res)
@test length(seq) == n_res
@test sequence_id(seq) == string(i)
@test isempty(annotations(seq)) # no annotations
end
for i = 2:4
@test join(fasta[i]) == join(fasta[1]) # all sequences have the same residues
end
# but different ids
@test sequence_id.(fasta) == [
"SEQUENCE_1",
" SEQUENCE_2",
"MCHU - Calmodulin - Human, rabbit, bovine, rat, and chicken",
"gi|5524211|gb|AAD44166.1| cytochrome b [Elephas maximus maximus]",
]
end
@testset "Getter Functions" begin
annot = Annotations(
OrderedDict("FileFeature" => "FileAnnotation"),
Dict(("SeqID", "Type") => "P1", ("SeqID", "Title") => "Protein Title"),
Dict("ColFeature" => "HHHHHH"),
Dict(("SeqID", "ResFeature") => "001100"),
)
seq = AnnotatedSequence("SeqID", "ARNDCQ", annot)
@test getannotfile(seq) == OrderedDict("FileFeature" => "FileAnnotation")
@test getannotcolumn(seq) == Dict("ColFeature" => "HHHHHH")
@test getannotsequence(seq, "Type") == "P1"
@test getannotsequence(seq, "Title") == "Protein Title"
@test getannotresidue(seq, "ResFeature") == "001100"
end
@testset "Setter Functions" begin
new_seq = deepcopy(seq)
setannotfile!(new_seq, "NewFileFeature", "NewFileAnnotation")
@test getannotfile(new_seq, "NewFileFeature") == "NewFileAnnotation"
setannotcolumn!(new_seq, "NewColFeature", "::.. ")
@test getannotcolumn(new_seq, "NewColFeature") == "::.. "
setannotsequence!(new_seq, "NewType", "NewTypeAnnotation")
@test getannotsequence(new_seq, "NewType") == "NewTypeAnnotation"
setannotresidue!(new_seq, "NewResFeature", "ARnd..")
@test getannotresidue(new_seq, "NewResFeature") == "ARnd.."
end
@testset "IO" begin
@testset "FASTASequences" begin
in_fasta = read_file(joinpath(DATA, "Q9NW08.fasta"), FASTASequences)
io = IOBuffer()
print_file(io, in_fasta, FASTASequences)
seekstart(io)
out_fasta = parse_file(io, FASTASequences)
@test in_fasta == out_fasta
end
@testset "PIRSequences" begin
in_pir = read_file(joinpath(DATA, "transcripts.pir"), PIRSequences)
io = IOBuffer()
print_file(io, in_pir, PIRSequences)
seekstart(io)
out_pir = parse_file(io, PIRSequences)
@test in_pir == out_pir
end
@testset "RawSequences" begin
in_raw = read_file(joinpath(DATA, "raw_sequences.txt"), RawSequences)
io = IOBuffer()
print_file(io, in_raw, RawSequences)
seekstart(io)
out_raw = parse_file(io, RawSequences)
@test in_raw == out_raw
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 6562 | @testset "Shuffle" begin
msa_types = (
Matrix{Residue},
NamedResidueMatrix{Array{Residue,2}},
MultipleSequenceAlignment,
AnnotatedMultipleSequenceAlignment,
)
N = length(msa_types)
pf09645_sto = joinpath(DATA, "PF09645_full.stockholm")
msas = [read_file(pf09645_sto, Stockholm, T) for T in msa_types]
gaps = [msa .== GAP for msa in msas]
lcol = [mean(msa .== Residue('L'), dims = 1) for msa in msas]
lseq = [mean(msa .== Residue('L'), dims = 2) for msa in msas]
Random.seed!(42)
@testset "General" begin
msa = msas[1]
for dim in [1, 2]
aln = copy(msa)
shuffle_msa!(aln, dims = dim)
@test aln != msa
@test (aln .== GAP) == gaps[1] # default: fixed gaps
aln = copy(msa)
shuffle_msa!(aln, dims = dim, fixedgaps = true)
@test aln != msa
@test (aln .== GAP) == gaps[1]
aln = copy(msa)
shuffle_msa!(aln, dims = dim, fixedgaps = false)
@test aln != msa
@test (aln .== GAP) != gaps[1]
end
@test_throws AssertionError shuffle_msa(msa, dims = 0, fixedgaps = true)
@test_throws AssertionError shuffle_msa(msa, dims = 3, fixedgaps = true)
end
@testset "Columns" begin
for i = 1:N
# Fixed gaps
msa = msas[i]
aln = shuffle_msa(msa, dims = 2, fixedgaps = true)
@test aln != getresidues(msa)
@test (aln .== GAP) == gaps[i]
@test lcol[i] == mean(aln .== Residue('L'), dims = 1)
@test lseq[i] != mean(aln .== Residue('L'), dims = 2)
# Change gap positions
aln = shuffle_msa(msa, dims = 2, fixedgaps = false)
@test aln != getresidues(msa)
@test (aln .== GAP) != gaps[i]
@test lcol[i] == mean(aln .== Residue('L'), dims = 1)
end
end
@testset "Sequences" begin
for i = 1:N
# Fixed gaps
msa = msas[i]
aln = shuffle_msa(msa, dims = 1, fixedgaps = true)
@test aln != getresidues(msa)
@test (aln .== GAP) == gaps[i]
@test lcol[i] != mean(aln .== Residue('L'), dims = 1)
@test lseq[i] == mean(aln .== Residue('L'), dims = 2)
# Change gap positions
aln = shuffle_msa(msa, dims = 1, fixedgaps = false)
@test aln != getresidues(msa)
@test (aln .== GAP) != gaps[i]
@test lseq[i] == mean(aln .== Residue('L'), dims = 2)
end
end
@testset "Reference" begin
for msa in msas
ref = getsequence(msa, 1)
for dims in [1, 2]
for gaps in [true, false]
aln = shuffle_msa(
msa,
dims = dims,
fixedgaps = gaps,
fixed_reference = true,
)
@test getsequence(aln, 1) == ref
end
end
end
msa = msas[end] # AnnotatedMultipleSequenceAlignment
aln = shuffle_msa(msa, dims = 1, 1, fixed_reference = true)
@test msa == aln
end
@testset "Subset" begin
for msa in msas
seqs_to_move = [3, 4]
shuffled_seqs = shuffle_msa(msa, seqs_to_move, dims = 1)
@test msa[1:2, :] == shuffled_seqs[1:2, :]
@test msa[seqs_to_move, :] != shuffled_seqs[seqs_to_move, :]
cols_to_move = [9, 10, 11, 12]
shuffled_cols = shuffle_msa(MersenneTwister(0), msa, cols_to_move, dims = 2)
@test msa[:, 1:8] == shuffled_cols[:, 1:8]
@test msa[:, cols_to_move] != shuffled_cols[:, cols_to_move]
# Annotations
if isa(msa, AnnotatedMultipleSequenceAlignment)
@test isempty(
getannotsequence(shuffled_seqs, "C3N734_SULIY/1-95", "Shuffled", ""),
)
@test isempty(
getannotsequence(shuffled_seqs, "H2C869_9CREN/7-104", "Shuffled", ""),
)
@test getannotsequence(shuffled_seqs, "Y070_ATV/2-70", "Shuffled", "") ==
"true"
@test getannotsequence(shuffled_seqs, "F112_SSV1/3-112", "Shuffled", "") ==
"true"
# 1111
# 1234567890123
@test startswith(
getannotcolumn(shuffled_cols, "Shuffled", ""),
"0000000011110",
)
# MIToS modifications
any(
startswith("2 sequences shuffled."),
values(getannotfile(shuffled_seqs)),
)
any(startswith("4 columns shuffled."), values(getannotfile(shuffled_cols)))
end
end
annot_msa = read_file(
pf09645_sto,
Stockholm,
AnnotatedMultipleSequenceAlignment,
generatemapping = true,
useidcoordinates = true,
)
@testset "Shuffling a single sequence or column" begin
ref = getsequence(annot_msa, 1)
@test getsequence(shuffle_msa(annot_msa, 1, dims = 1), 1) != ref
@test getsequence(shuffle_msa(annot_msa, "C3N734_SULIY/1-95", dims = 1), 1) !=
ref
# 10 == "15" : "EKQE"
shuffled_col_a =
shuffle_msa(MersenneTwister(3210), annot_msa, 10, dims = 2)[:, 10]
shuffled_col_b =
shuffle_msa(MersenneTwister(3210), annot_msa, "15", dims = 2)[:, "15"]
@test shuffled_col_a == shuffled_col_b # as we used the same seed
@test replace(replace(join(shuffled_col_a), "E" => ""), "K" => "") == "Q"
@test replace(replace(join(shuffled_col_b), "E" => ""), "K" => "") == "Q"
@test annot_msa[:, 10] != shuffled_col_a
@test annot_msa[:, "15"] != shuffled_col_b
end
@testset "Delete the SeqMap annotation of shuffled sequences" begin
shuffled_seqs = shuffle_msa(annot_msa, [2, 3], dims = 1)
@test getannotsequence(shuffled_seqs, "H2C869_9CREN/7-104", "SeqMap", "") == ""
@test getannotsequence(shuffled_seqs, "F112_SSV1/3-112", "SeqMap", "") != ""
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 726 | @testset "Three letters name" begin
@test three2residue("ALA") == Residue('A')
@test three2residue("Ala") == Residue('A')
@test three2residue("ala") == Residue('A')
@test_throws ErrorException three2residue("AL")
@test_throws ErrorException three2residue("Alanine")
@test residue2three(Residue('A')) == "ALA"
@test_throws ErrorException residue2three(GAP)
@test_throws ErrorException residue2three(reinterpret(Residue, -30))
@test three2residue("XAA") == XAA
@test three2residue("UNK") == XAA
# Example for three letter amino acid sequence
seq = "AlaCysAspGluPheGlyHisIleLysAsxXaaGlx"
@test [three2residue(seq[i:i+2]) for i = 1:3:length(seq)] == res"ACDEFGHIKXXX"
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1961 | @testset "AlphaFoldDB Tests" begin
@testset "query_alphafolddb tests" begin
@testset "Valid UniProt ID" begin
structure_info = query_alphafolddb("A0A0C5B5G6")
@test structure_info["uniprotAccession"] == "A0A0C5B5G6"
@test structure_info["uniprotId"] == "MOTSC_HUMAN"
@test match(r"^https.+\.pdb$", structure_info["pdbUrl"]) !== nothing
end
# @testset "Invalid UniProt Accession" begin
# @test_throws HTTP.Exceptions.StatusError query_alphafolddb("INVALID_ACCESSION")
# end
end
@testset "download_alphafold_structure tests" begin
@testset "Download PDB format" begin
outfile = download_alphafold_structure("A0A0C5B5G6", format = PDBFile)
if isfile(outfile)
try
res = read_file(outfile, PDBFile)
@test length(res) == 16
finally
rm(outfile)
end
end
end
@testset "Download mmCIF format" begin
outfile = download_alphafold_structure("A0A0C5B5G6", format = MMCIFFile)
if isfile(outfile)
try
res = read_file(outfile, MMCIFFile)
@test length(res) == 16
finally
rm(outfile)
end
end
end
@testset "Unsupported format" begin
struct UnsupportedFormat <: FileFormat end
@test_throws ArgumentError download_alphafold_structure(
"A0A0C5B5G6",
format = UnsupportedFormat,
)
end
end
@testset "_extract_filename_from_url tests" begin
@testset "Extract filename" begin
filename =
MIToS.PDB._extract_filename_from_url("https://example.com/structure.pdb")
@test filename == "structure.pdb"
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 15402 | @testset "Contacts" begin
txt(code) = joinpath(DATA, string(uppercase(code), ".pdb"))
@testset "Piccolo" begin
# Using data from http://www-cryst.bioc.cam.ac.uk/~richard/piccolo/piccolo.php?PDB=1IGY (28/Sep/2015)
code = "1IGY"
pdb = read_file(txt(code), PDBFile)
# Modify the next line if ligands are added to AtomsData.jl
@test sum(check_atoms_for_interactions(r) for r in pdb) ==
sum(r.id.group == "ATOM" for r in pdb)
@testset "findheavy" begin
for res in pdb
heavy = findheavy(res)
for i in eachindex(res.atoms)
if i in heavy
@test res.atoms[i].element != "H"
else
@test res.atoms[i].element == "H"
end
end
end
end
@testset "Interface between chain A and D" begin
# Contacts: Chain A (4 residues) y Chain D (3 residues)
C1 = residuesdict(pdb, model = "1", chain = "A", group = "ATOM")
C2 = residuesdict(pdb, model = "1", chain = "D", group = "ATOM")
TRUE = [
("126", "311"),
("183", "309"),
("183", "310"),
("184", "309"),
("187", "309"),
("187", "310"),
("187", "311"),
("187", "312"),
("187", "319"),
("210", "237"),
("211", "312"),
("212", "237"),
("213", "237"),
("213", "312"),
("213", "313"),
("214", "237"),
]
for (resnum1, resnum2) in TRUE
res1 = C1[resnum1]
res2 = C2[resnum2]
@test contact(res1, res2, 6.5)
if resnum2 == "309" && (resnum1 == "184" || resnum1 == "187")
@test ionic(res1, res2)
else
@test !ionic(res1, res2)
end
if (resnum1 == "211" && resnum2 == "312") ||
(resnum1 == "212" && resnum2 == "237")
@test vanderwaals(res1, res2)
else
@test !vanderwaals(res1, res2)
end
if resnum1 == "212" && resnum2 == "237"
@test hydrophobic(res1, res2)
else
@test !hydrophobic(res1, res2)
end
if resnum1 == "211" && resnum2 == "312"
@test vanderwaalsclash(res1, res2)
else
@test !vanderwaalsclash(res1, res2)
end
@test !aromaticsulphur(res1, res2)
@test !pication(res1, res2)
@test !disulphide(res1, res2)
@test !aromatic(res1, res2)
@test !hydrogenbond(res1, res2)
@test !hydrogenbond(res1, res2)
@test !covalent(res1, res2)
end
end
@testset "Aromatic between chain A and B" begin
C1 = residuesdict(pdb, model = "1", chain = "A", group = "ATOM")
C2 = residuesdict(pdb, model = "1", chain = "B", group = "ATOM")
@test aromatic(C1["36"], C2["103"])
@test aromatic(C1["94"], C2["47"])
@test aromatic(C1["94"], C2["50"])
@test aromatic(C1["96"], C2["47"])
@test aromatic(C1["98"], C2["103"])
@test aromatic(C1["135"], C2["174"])
@test !aromatic(C1["96"], C2["50"])
end
end
@testset "1AKS" begin
pdb = read_file(joinpath(DATA, "1AKS.xml.gz"), PDBML)
CA = residuesdict(pdb, model = "1", chain = "A", group = "ATOM")
CB = residuesdict(pdb, model = "1", chain = "B", group = "ATOM")
@test aromaticsulphur(CA["20"], CB["157"])
@test !aromaticsulphur(CA["29"], CB["157"])
@test pication(CA["20"], CB["159"])
@test pication(CA["40"], CB["151"])
@test pication(CA["91"], CB["234"])
@test pication(CA["91"], CB["237"])
@test !pication(CA["57"], CB["215"])
@test disulphide(CA["22"], CB["157"])
@test disulphide(CA["128"], CB["232"])
@test disulphide(CA["136"], CB["201"])
@test aromatic(CA["91"], CB["234"])
@test aromatic(CA["91"], CB["237"])
@test !aromatic(CA["141"], CB["151"])
@test !aromatic(CA["141"], CB["152"])
@test !aromatic(CA["57"], CB["215"])
@test !aromatic(CA["90"], CB["237"])
@test ionic(CA["87"], CB["245"]) # 1aksAB A 87 LYS NZ B 245 ASN OXT
@test ionic(CA["107"], CB["245"])
@test ionic(CA["135"], CB["159"])
@test covalent(CA["22"], CB["157"])
@test covalent(CA["128"], CB["232"])
@test covalent(CA["136"], CB["201"])
for (res1, res2) in Tuple{String,String}[
("136", "160"),
("17", "189"),
("20", "157"),
("140", "156"),
("138", "158"),
("144", "150"),
("137", "200"),
]
@test hydrogenbond(CA[res1], CB[res2])
end
for (res1, res2) in Tuple{String,String}[
("17", "221A"),
("138", "160"),
("87", "245"),
("89", "245"),
("16", "194"),
("17", "189"),
("17", "191"),
("17", "220"),
("20", "157"),
("22", "157"),
("124", "232"),
("128", "232"),
("134", "201"),
("136", "201"),
("137", "157"),
("143", "191"),
("16", "156"),
("124", "204"),
("124", "210"),
("129", "210"),
("143", "192"),
("144", "156"),
("47", "238"),
("47", "242"),
("48", "242"),
("51", "242"),
("53", "212"),
("53", "238"),
("55", "212"),
("103", "212"),
("103", "238"),
("105", "238"),
("105", "242"),
("123", "238"),
("16", "158"),
("21", "154"),
("22", "155"),
("27", "155"),
("30", "155"),
("47", "209"),
("53", "209"),
("72", "154"),
("121", "209"),
("123", "209"),
("138", "158"),
("141", "155"),
("20", "159"),
("135", "159"),
("137", "159"),
("99", "180"),
("100", "180"),
("29", "198"),
("141", "152"),
("144", "152"),
("51", "241"),
("89", "241"),
("100", "177"),
("103", "229"),
("105", "241"),
("89", "237"),
("91", "237"),
("99", "215"),
("103", "237"),
("105", "237"),
("101", "234"),
("103", "234"),
("138", "228"),
("143", "151"),
("29", "200"),
("121", "200"),
("123", "231"),
("123", "235"),
("124", "231"),
("124", "235"),
("129", "162"),
("134", "162"),
("136", "183"),
("136", "199"),
("138", "183"),
("138", "199"),
("138", "213"),
("51", "245"),
]
@test hydrophobic(CA[res1], CB[res2])
end
for (res1, res2) in Tuple{String,String}[
("136", "160"),
("107", "245"),
("16", "194"),
("17", "189"),
("124", "232"),
("22", "156"),
("142", "192"),
("22", "155"),
("140", "155"),
("45", "198"),
("57", "195"),
("143", "149"),
("140", "194"),
("142", "194"),
("19", "157"),
("20", "157"),
("22", "157"),
("128", "232"),
("134", "201"),
("136", "201"),
("138", "157"),
("16", "156"),
("124", "210"),
("125", "204"),
("127", "210"),
("139", "156"),
("140", "156"),
("143", "192"),
("55", "196"),
("142", "193"),
("103", "212"),
("16", "158"),
("53", "209"),
("72", "154"),
("122", "209"),
("124", "209"),
("138", "158"),
("141", "155"),
("136", "159"),
("100", "180"),
("29", "198"),
("30", "198"),
("134", "161"),
("142", "152"),
("144", "152"),
("42", "195"),
("43", "195"),
("57", "214"),
("72", "153"),
("73", "153"),
("74", "153"),
("102", "214"),
("143", "150"),
("144", "150"),
("145", "146"),
("145", "150"),
("145", "147"), # OXT
("102", "229"),
("91", "237"),
("101", "234"),
("143", "151"),
("121", "200"),
("134", "162"),
("137", "200"),
("100", "177"),
("133", "162"),
]
@test vanderwaalsclash(CA[res1], CB[res2])
end
for (res1, res2) in Tuple{String,String}[
("17", "221A"),
("135", "160"),
("89", "245"),
("100", "179"),
("101", "179"),
("107", "245"),
("134", "202"),
("16", "189"),
("16", "194"),
("20", "157"),
("22", "157"),
("136", "201"),
("137", "157"),
("44", "196"),
("40", "193"),
("134", "161"),
("124", "209"),
("135", "159"),
("44", "198"),
("51", "245"),
("102", "214"),
("29", "200"),
("17", "189"),
("140", "194"),
("141", "194"),
("142", "194"),
("19", "157"),
("27", "157"),
("124", "232"),
("128", "232"),
("134", "201"),
("135", "201"),
("138", "157"),
("143", "191"),
("16", "156"),
("20", "156"),
("21", "156"),
("22", "156"),
("122", "204"),
("139", "156"),
("140", "156"),
("43", "196"),
("124", "204"),
("124", "210"),
("125", "204"),
("127", "210"),
("129", "210"),
("142", "192"),
("143", "192"),
("144", "156"),
("17", "188A"),
("18", "188A"),
("44", "197"),
("54", "196"),
("55", "196"),
("122", "203"),
("142", "193"),
("145", "148"),
("47", "238"),
("51", "242"),
("137", "158"),
("138", "158"),
("53", "212"),
("54", "212"),
("55", "212"),
("103", "212"),
("103", "238"),
("105", "238"),
("123", "238"),
("16", "158"),
("136", "159"),
("137", "159"),
("21", "154"),
("21", "155"),
("22", "155"),
("27", "155"),
("30", "155"),
("45", "209"),
("47", "209"),
("53", "209"),
("45", "198"),
("133", "161"),
("71", "154"),
("71", "155"),
("72", "154"),
("121", "209"),
("122", "209"),
("140", "155"),
("141", "154"),
("141", "155"),
("17", "188"),
("128", "230"),
("98", "180"),
("99", "180"),
("100", "180"),
("29", "198"),
("30", "198"),
("135", "161"),
("138", "198"),
("139", "198"),
("141", "152"),
("142", "152"),
("144", "152"),
("16", "190"),
("17", "146"),
("42", "195"),
("72", "153"),
("43", "195"),
("57", "195"),
("57", "214"),
("58", "195"),
("71", "153"),
("132", "164"),
("141", "153"),
("143", "149"),
("143", "150"),
("144", "150"),
("145", "146"),
("145", "147"),
("145", "149"),
("73", "153"),
("74", "153"),
("145", "150"),
("89", "241"),
("100", "177"),
("102", "229"),
("105", "241"),
("91", "237"),
("92", "237"),
("99", "215"),
("138", "228"),
("143", "151"),
("103", "237"),
("105", "237"),
("91", "234"),
("101", "234"),
("103", "234"),
("121", "200"),
("124", "231"),
("124", "235"),
("132", "162"),
("133", "162"),
("134", "162"),
("136", "162"),
("136", "199"),
("136", "160"),
("138", "160"),
("136", "200"),
("137", "199"),
("137", "200"),
("138", "183"),
("138", "199"),
]
@test vanderwaals(CA[res1], CB[res2])
end
end
@testset "Vectorized contact/distance" begin
code = "2VQC"
pdb = read_file(txt(code), PDBFile)
for criteria in ["All", "CA", "CB", "Heavy"]
dist = distance(pdb, criteria = criteria)
sq_d = squared_distance(pdb, criteria = criteria)
cont = contact(pdb, 6.05, criteria = criteria)
@test all(diag(cont))
@test all(diag(dist) .== 0.0)
@test all(diag(sq_d) .== 0.0)
@test all((dist .<= 6.05) .== cont)
@test all((sq_d .<= 36.6025) .== cont) # 6.05^2
end
end
@testset "Proximity Mean" begin
code = "2VQC"
pdb = read_file(txt(code), PDBFile)
residues = select_residues(
pdb,
model = "1",
chain = "A",
group = "ATOM",
residue = x -> x in ["62", "64", "65"],
)
@test contact(residues, 6.05) == ([
1 1 0
1 1 1
0 1 1
] .== 1) # All == Heavy (2VQC doesn't have Hs)
@test proximitymean(residues, [1.0, 2.0, 3.0], 6.05) == [2.0, 2.0, 2.0]
@test proximitymean(residues, [10.0, 15.0, 30.0], 6.05) == [15.0, 20.0, 15.0]
@test proximitymean(residues, [1.0, 2.0, 3.0], 6.05, include = true) ==
[3, 12 / 3, 5] ./ 2.0
@test proximitymean(residues, [10.0, 15.0, 30.0], 6.05, include = true) ==
[25, 110 / 3, 45] ./ 2.0
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 1148 | @testset "AtomsData internals" begin
@test length(PDB._3_letter_aa) == 20
s = Set{Tuple{String,String}}()
PDB._add_CTER_O!(s)
@test length(s) == 20 * 3
for aa in PDB._3_letter_aa
@test (aa, "OXT") in s
@test (aa, "OT2") in s
@test (aa, "OT1") in s
end
d = Dict{Tuple{String,String},Float64}()
PDB._add_CTER_O!(d, 1.0)
@test length(d) == 20 * 3
for aa in PDB._3_letter_aa
@test d[(aa, "OXT")] == 1.0
@test d[(aa, "OT2")] == 1.0
@test d[(aa, "OT1")] == 1.0
end
value = Set(String["OXT", "OT2", "OT1"])
gd = Dict{String,Set{String}}()
PDB._generate_dict!(gd, d)
@test length(gd) == 20
for aa in PDB._3_letter_aa
@test gd[aa] == value
end
gs = Dict{String,Set{String}}()
PDB._generate_dict!(gs, d)
@test length(gs) == 20
for aa in PDB._3_letter_aa
@test gs[aa] == value
end
as = PDB._generate_interaction_keys(d, s, s, s, s)
@test length(as) == 20
for aa in PDB._3_letter_aa
@test as[aa] == value
end
@test isa(PDB._interaction_keys, Dict{String,Set{String}})
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 7788 | @testset "Kabsch algorithm" begin
@testset "Test I" begin
a = [
1 1 0
2 1 0
1+cos(pi / 4) 1-sin(pi / 4) 0
1 0 0
]
b = [
1 1 0
1 2 0
1+cos(pi / 4) 1+sin(pi / 4) 0
] # mean(b,1) 1.2357 1.56904 0.0
sa = a[1:3, :]
ma = mean(sa, dims = 1) # mean x, y, z for a[1:3,:]
# Center b, sa, a
center!(b)
sa[:, :] = sa .- ma
a[:, :] = a .- ma # center != 0
@test isapprox(vec(mean(b, dims = 1)), [0.0, 0.0, 0.0], atol = 1e-13)
@test isapprox(vec(mean(sa, dims = 1)), [0.0, 0.0, 0.0], atol = 1e-13)
F = kabsch(b, sa) # Reference: b
R = sa * F
RR = a * F
@test R ≈ b
@test RR[1:3, :] ≈ b
@test RR[4, :] .- RR[1, :] ≈ [1.0, 0.0, 0.0]
@test sqrt((RR[1, 1] - RR[4, 1])^2 + (RR[1, 2] - RR[4, 2])^2) ≈ 1.0
@test isapprox(PDB.rmsd(R, b), 0.0, atol = 1e-13)
@test isapprox(PDB.rmsd(RR[1:3, :], b), 0.0, atol = 1e-13)
end
@testset "BiomolecularStructures' test" begin
P = [
51.65 -1.90 50.07
50.40 -1.23 50.65
50.68 -0.04 51.54
50.22 -0.02 52.85
]
Q = [
51.30 -2.99 46.54
51.09 -1.88 47.58
52.36 -1.20 48.03
52.71 -1.18 49.38
] # P and Q are from BiomolecularStructures.jl' kabsch tests
Qdistances = Float64[sqrt(sum(abs2, Q[j, :] .- Q[i, :])) for i = 1:4, j = 1:4]
center!(P)
center!(Q)
@test isapprox(vec(mean(P, dims = 1)), zeros(3), atol = 1e-13)
@test isapprox(vec(mean(Q, dims = 1)), zeros(3), atol = 1e-13)
rotationmatrix = kabsch(P, Q)
rotated = Q * rotationmatrix
@test PDB.rmsd(P, rotated) ≈ 0.0030426652601371583
# Internal distances mustn't change
@test Float64[sqrt(sum(abs2, Q[j, :] .- Q[i, :])) for i = 1:4, j = 1:4] ≈ Qdistances # Translated
@test Float64[
sqrt(sum(abs2, rotated[j, :] .- rotated[i, :])) for i = 1:4, j = 1:4
] ≈ Qdistances # Translated and rotated
end
@testset "Test II" begin
P = [
-1.0 0.0 0.0
0.0 2.0 0.0
0.0 1.0 0.0
0.0 1.0 1
]
Q = [
0.0 -1.0 -1.0
0.0 -1.0 0
0.0 0.0 0.0
-1.0 0.0 0.0
]
center!(P)
center!(Q)
rotated = Q * kabsch(P, Q)
@test isapprox(PDB.rmsd(P, rotated), 0.695, atol = 0.001)
end
@testset "RMSF" begin
A = [
-1.0 -1.0 -1.0
-1.0 0.0 1.0
]
B = [
1.0 1.0 1.0
-1.0 0.0 1.0
]
w = [0.25, 0.75]
@test_throws ArgumentError mean_coordinates(Matrix{Float64}[A[1:1, :], B])
@test_throws ArgumentError mean_coordinates(Matrix{Float64}[A[:, 1:2], B])
@test_throws ArgumentError mean_coordinates(Matrix{Float64}[A])
@test_throws ArgumentError mean_coordinates(Matrix{Float64}[A, B'])
@test_throws ArgumentError mean_coordinates(Matrix{Float64}[A', B'])
@test mean_coordinates(Matrix{Float64}[A, B]) == [
0.0 0.0 0.0
-1.0 0.0 1.0
]
@test mean_coordinates(Matrix{Float64}[A, B], w) == [
0.5 0.5 0.5
-1.0 0.0 1.0
]
@test mean_coordinates(Matrix{Float64}[A, B], reverse(w)) == [
-0.5 -0.5 -0.5
-1.0 0.0 1.0
]
@test rmsf(Matrix{Float64}[A, B]) == [sqrt(3), 0.0]
@test rmsf(Matrix{Float64}[A, B], w) == [sqrt(0.25 * 6.75 + 0.75 * 0.75), 0.0]
end
@testset "Superimpose PDBs" begin
hemoglobin =
read_file(joinpath(DATA, "2hhb.pdb.gz"), PDBFile, group = "ATOM", model = "1")
α1 = select_residues(hemoglobin, model = "1", chain = "A", group = "ATOM")
α2 = select_residues(hemoglobin, model = "1", chain = "C", group = "ATOM")
β1 = select_residues(hemoglobin, model = "1", chain = "B", group = "ATOM")
β2 = select_residues(hemoglobin, model = "1", chain = "D", group = "ATOM")
a1, a2, rα = superimpose(α1, α2)
@test isapprox(rα, 0.230, atol = 0.001) # Bio3D RMSD
_, _, rα2 = superimpose(α1, α2, zip(eachindex(α1), eachindex(α2)))
@test rα2 == rα
_, _, rα2 = superimpose(α1, α2, zip(1:2, 1:2))
@test rα2 < rα / 10 # aligning only 2 points is accurate
b1, b2, rβ = superimpose(β1, β2)
@test isapprox(rβ, 0.251, atol = 0.001) # Bio3D & Chimera's MatchMaker RMSD
@test length(a1) == length(α1)
@test length(a2) == length(α2)
@test length(b1) == length(β1)
@test length(b2) == length(β2)
@test a1 != α1
@test a2 != α2
@test b1 != β1
@test b2 != β2
@testset "Vector{PDBResidue}" begin
@test isapprox(PDB.rmsd(α1, α2), 0.230, atol = 0.001)
@test isapprox(PDB.rmsd(β1, β2), 0.251, atol = 0.001)
@test isapprox(PDB.rmsd(a1, a2, superimposed = true), 0.230, atol = 0.001)
@test isapprox(PDB.rmsd(b1, b2, superimposed = true), 0.251, atol = 0.001)
ca_a1 = CAmatrix(a1)
ca_a2 = CAmatrix(a2)
ca_b1 = CAmatrix(b1)
ca_b2 = CAmatrix(b2)
@testset "Centered" begin
@test isapprox(mean(ca_a1, dims = 1), zeros(1, 3), atol = 1e-13)
@test isapprox(mean(ca_a2, dims = 1), zeros(1, 3), atol = 1e-13)
@test isapprox(mean(ca_b1, dims = 1), zeros(1, 3), atol = 1e-13)
@test isapprox(mean(ca_b2, dims = 1), zeros(1, 3), atol = 1e-13)
@test PDB._iscentered(ca_a1)
@test PDB._iscentered(ca_a2)
@test PDB._iscentered(ca_b1)
@test PDB._iscentered(ca_b2)
end
@testset "RMSD" begin
@test rα == PDB.rmsd(ca_a1, ca_a2)
@test rβ == PDB.rmsd(ca_b1, ca_b2)
end
end
@testset "coordinatesmatrix and centered..." begin
@test coordinatesmatrix(centeredresidues(α1)) ≈ coordinatesmatrix(a1)
@test coordinatesmatrix(centeredresidues(β1)) ≈ coordinatesmatrix(b1)
@test centeredcoordinates(α1) ≈ coordinatesmatrix(a1)
@test centeredcoordinates(β1) ≈ coordinatesmatrix(b1)
end
@testset "Superimpose with centered PDBs" begin
b12, b22, rβ2 = superimpose(b1, b2)
@test rβ2 ≈ rβ
end
@testset "RMSF" begin
@test rmsf(Vector{PDBResidue}[α1, α2]) ==
rmsf(Vector{PDBResidue}[α1, α2], [0.5, 0.5])
@test rmsf(Vector{PDBResidue}[β1, β2]) ==
rmsf(Vector{PDBResidue}[β1, β2], [0.5, 0.5])
end
end
@testset "PDBResidue without alpha-carbon" begin
small_2WEL = read_file(joinpath(DATA, "2WEL_D_region.pdb"), PDBFile)
small_6BAB = read_file(joinpath(DATA, "6BAB_D_region.pdb"), PDBFile)
aln_2WEL, aln_6BAB, RMSD = superimpose(small_2WEL, small_6BAB)
@test length(aln_2WEL) == 3
@test length(aln_6BAB) == 3
@test small_2WEL[2].atoms[1].coordinates ≉ aln_2WEL[2].atoms[1].coordinates
@test small_6BAB[2].atoms[1].coordinates ≉ aln_6BAB[2].atoms[1].coordinates
@test RMSD < 1.0e-14 # e.g. 6.9e-15, 3.7e-15 & 1.6e-15
@testset "PDB without CA" begin
filter!(atom -> atom.atom != "CA", small_2WEL[1].atoms)
filter!(atom -> atom.atom != "CA", small_6BAB[3].atoms)
@test_throws ArgumentError superimpose(small_2WEL, small_6BAB)
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 18104 | @testset "Parse PDB and PDBML" begin
txt(code) = joinpath(DATA, string(uppercase(code), ".pdb"))
xml(code) = joinpath(DATA, string(uppercase(code), ".xml"))
@testset "2VQC: Missings" begin
code = "2VQC"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
@test findfirst(x -> x.id.number == "4", pdb) ==
findfirst(x -> x.id.number == "4", pdbml)
@test findfirst(x -> x.id.number == "73", pdb) ==
findfirst(x -> x.id.number == "73", pdbml)
end
@testset "Test downloadpdb" begin
code = "2VQC"
pdb = read_file(txt(code), PDBFile) # reference
# PDBML
filename = downloadpdb(code, format = PDBML)
try
pdbml = read_file(filename, PDBML)
@test findfirst(x -> x.id.number == "4", pdb) ==
findfirst(x -> x.id.number == "4", pdbml)
@test findfirst(x -> x.id.number == "73", pdb) ==
findfirst(x -> x.id.number == "73", pdbml)
finally
rm(filename)
end
# mmCIF (the default format)
filename = downloadpdb(code)
try
mmcif = read_file(filename, MMCIFFile)
@test findfirst(x -> x.id.number == "4", pdb) ==
findfirst(x -> x.id.number == "4", mmcif)
@test findfirst(x -> x.id.number == "73", pdb) ==
findfirst(x -> x.id.number == "73", mmcif)
finally
rm(filename)
end
# PDB
filename = downloadpdb(
code,
headers = Dict(
"User-Agent" => "Mozilla/5.0 (compatible; MSIE 7.01; Windows NT 5.0)",
),
format = PDBFile,
)
try
d_pdb = read_file(filename, PDBFile)
@test findfirst(x -> x.id.number == "4", pdb) ==
findfirst(x -> x.id.number == "4", d_pdb)
@test findfirst(x -> x.id.number == "73", pdb) ==
findfirst(x -> x.id.number == "73", d_pdb)
finally
rm(filename)
end
end
@testset "1H4A: Chain A (auth) == Chain X (label)" begin
file = string(xml("1H4A"), ".gz")
auth = read_file(file, PDBML, label = false)
label = read_file(file, PDBML)
@test unique([res.id.chain for res in auth]) == ["X"]
@test unique([res.id.chain for res in label]) == ["A", "B"]
end
@testset "1SSX: Residues with insert codes, 15A 15B" begin
code = "1SSX"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
for residue_list in [pdb, pdbml]
@test findall(res -> res.id.number == "15A", residue_list) == [1]
@test findall(res -> isresidue(res, residue = "15A"), residue_list) == [1]
@test findall(res -> res.id.number == "15B", residue_list) == [2]
@test findall(res -> isresidue(res, residue = "15B"), residue_list) == [2]
end
@testset "Occupancy != 1.0" begin
@test sum(
map(
a -> (a.atom == "HH22" ? a.occupancy : 0.0),
filter(r -> r.id.number == "141", pdbml)[1].atoms,
),
) == 1.0
@test sum([
a.occupancy for a in select_atoms(
pdbml,
model = "1",
chain = "A",
group = All,
residue = "141",
atom = "HH22",
)
]) == 1.0
end
@testset "Best occupancy" begin
atoms_141 = select_atoms(
pdbml,
model = "1",
chain = "A",
group = All,
residue = "141",
atom = "HH22",
)
resid_141 = select_residues(
pdbml,
model = "1",
chain = "A",
group = All,
residue = "141",
)
@test bestoccupancy(atoms_141)[1].occupancy == 0.75
@test bestoccupancy(reverse(atoms_141))[1].occupancy == 0.75
@test bestoccupancy(PDBAtom[atoms_141[2]])[1].occupancy == 0.25
@test length(resid_141[1]) == 48
@test selectbestoccupancy(resid_141[1], collect(1:48)) == 1
@test selectbestoccupancy(resid_141[1], [1, 2]) == 1
@test_throws AssertionError selectbestoccupancy(resid_141[1], Int[])
@test_throws AssertionError selectbestoccupancy(resid_141[1], collect(1:100))
end
@testset "select_atom with All" begin
# ATOM 2 CA ALA A 15A 22.554 11.619 6.400 1.00 6.14 C
@test select_atoms(
pdb,
model = "1",
chain = "A",
group = "ATOM",
residue = All,
atom = r"C.+",
)[1].atom == "CA"
end
end
@testset "read_file with occupancyfilter=true" begin
# `read_file` only atoms with the best occupancy
code = "1SSX"
pdb = read_file(txt(code), PDBFile, occupancyfilter = true)
pdbml = read_file(xml(code), PDBML, occupancyfilter = true)
res_pdb =
select_residues(pdb, model = "1", chain = "A", group = All, residue = "141")
res_pdbml =
select_residues(pdbml, model = "1", chain = "A", group = All, residue = "141")
atm_pdbml = select_atoms(
pdbml,
model = "1",
chain = "A",
group = All,
residue = "141",
atom = "HH22",
)
@test length(atm_pdbml) == 1
@test atm_pdbml[1].occupancy == 0.75
@test length(
select_atoms(
pdb,
model = "1",
chain = "A",
group = All,
residue = "141",
atom = "HH22",
),
) == 1
@test length(res_pdb[1]) == 24
@test length(res_pdbml[1]) == 24
end
@testset "CBN: Identical PDBe ResNum for Residue 22" begin
# <residue dbSource="PDBe" dbCoordSys="PDBe" dbResNum="22" dbResName="SER">
# <crossRefDb dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="1cbn" dbResNum="22" dbResName="SER" dbChainId="A"/>
# <crossRefDb dbSource="UniProt" dbCoordSys="UniProt" dbAccessionId="P01542" dbResNum="22" dbResName="P"/>
# ....
# <residue dbSource="PDBe" dbCoordSys="PDBe" dbResNum="22" dbResName="PRO">
# <crossRefDb dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="1cbn" dbResNum="22" dbResName="PRO" dbChainId="A"/>
# <crossRefDb dbSource="UniProt" dbCoordSys="UniProt" dbAccessionId="P01542" dbResNum="22" dbResName="P"/>
# ...
code = "1CBN"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
@test length(
select_residues(pdb, model = "1", chain = "A", group = All, residue = "22"),
) == 2
@test length(
select_residues(pdbml, model = "1", chain = "A", group = All, residue = "22"),
) == 2
@test [
r.id.name for
r in select_residues(pdb, model = "1", chain = "A", group = All, residue = "22")
] == ["SER", "PRO"]
@test [
r.id.name for r in
select_residues(pdbml, model = "1", chain = "A", group = All, residue = "22")
] == ["SER", "PRO"]
end
@testset "1AS5: NMR" begin
code = "1AS5"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
@test length(select_residues(pdbml, model = "1", chain = "A")) == 25
@test length(select_residues(pdbml, model = "14", chain = "A")) == 25
@test length(select_residues(pdbml, model = All, chain = "A")) == 25 * 14
end
@testset "1DPO: Inserted residues lack insertion letters" begin
# Single unnamed chain in 1DPO contains insertions at postions 184 (Gly, Phe),
# 188 (Gly, Lys), and 221 (Ala, Leu) but no insertion letters.
code = "1DPO"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
# Single "A" chain for PDB (auth_asym_id in PDBML)
@test unique([r.id.chain for r in pdb]) == ["A"]
# But 'A':'H' chains for PDBML (label_asym_id)
@test unique([r.id.chain for r in pdbml]) == [string(chain) for chain = 'A':'H']
@test [
r.id.name for
r in select_residues(pdb, model = "1", chain = "A", residue = r"^184[A-Z]?$")
] == ["GLY", "PHE"]
@test [
r.id.name for
r in select_residues(pdbml, model = "1", chain = "A", residue = r"^184[A-Z]?$")
] == ["GLY", "PHE"]
@test [
r.id.name for
r in select_residues(pdb, model = "1", chain = "A", residue = r"^188[A-Z]?$")
] == ["GLY", "LYS"]
@test [
r.id.name for
r in select_residues(pdbml, model = "1", chain = "A", residue = r"^188[A-Z]*")
] == ["GLY", "LYS"]
@test [
r.id.name for
r in select_residues(pdb, model = "1", chain = "A", residue = r"^221[A-Z]?$")
] == ["ALA", "LEU"]
@test [
r.id.name for
r in select_residues(pdbml, model = "1", chain = "A", residue = r"^221[A-Z]?$")
] == ["ALA", "LEU"]
end
@testset "1IGY: Insertions" begin
# Insertions have more than one copy of the same amino acid in a single insertion block.
# For example, chain B in 1IGY contains a block of four residues inserted at sequence position 82.
# The block contains Leu-Ser-Ser-Leu.
code = "1IGY"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
@test [
r.id.name for r in select_residues(
pdb,
model = "1",
chain = "B",
group = All,
residue = r"^82[A-Z]?$",
)
] == ["LEU", "SER", "SER", "LEU"]
@test [
r.id.name for r in select_residues(
pdbml,
model = "1",
chain = "B",
group = All,
residue = r"^82[A-Z]?$",
)
] == ["LEU", "SER", "SER", "LEU"]
@test sum(
[
r.id.group for r in
select_residues(pdb, model = "1", chain = "D", group = All, residue = All)
] .== "HETATM",
) == length(
select_residues(pdb, model = "1", chain = "D", group = "HETATM", residue = All),
)
@test sum(
[
r.id.group for r in
select_residues(pdb, model = "1", chain = "D", group = All, residue = All)
] .== "ATOM",
) == length(
select_residues(pdb, model = "1", chain = "D", group = "ATOM", residue = All),
)
end
@testset "1HAG" begin
# Chain E begins with 1H, 1G, 1F, ... 1A, then 1 (in reverse alphabetic order)
code = "1HAG"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
@test unique([res.id.chain for res in pdb]) == ["E", "I"]
@test unique([res.id.chain for res in pdbml]) == ["A", "B", "C", "D", "E"]
# The chain E of PDB is the chain A of PDBML
@test [
r.id.number for r in select_residues(
pdb,
model = "1",
chain = "E",
group = All,
residue = r"^1[A-Z]?$",
)
] == [string(1, code) for code in vcat(collect('H':-1:'A'), "")]
@test [
r.id.number for r in select_residues(
pdbml,
model = "1",
chain = "A",
group = All,
residue = r"^1[A-Z]?$",
)
] == [string(1, code) for code in vcat(collect('H':-1:'A'), "")]
end
@testset "1NSA" begin
# Contains a single (unnamed) protein chain with sequence 7A-95A that continues 4-308.
code = "1NSA"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
# Single "A" chain for PDB (auth_asym_id in PDBML)
@test unique([r.id.chain for r in pdb]) == ["A"]
# But 'A':'F' chains for PDBML (label_asym_id)
@test unique([r.id.chain for r in pdbml]) == [string(chain) for chain = 'A':'F']
ind = findall(r -> r.id.number == "95A", pdbml)[1]
@test pdbml[ind+1].id.number == "4"
ind = findall(r -> r.id.number == "95A", pdb)[1]
@test pdb[ind+1].id.number == "4"
end
@testset "1IAO" begin
# Contains in chain B (in this order) 1S, 2S, 323P-334P, 6-94, 94A, 95-188, 1T, 2T
code = "1IAO"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
pdb_B = select_residues(pdb, model = "1", chain = "B")
pdbml_B = select_residues(pdbml, model = "1", chain = "B")
for B in [pdb_B, pdbml_B]
@test B[findall(r -> r.id.number == "2S", B)[1]+1].id.number == "323P"
@test B[findall(r -> r.id.number == "334P", B)[1]+1].id.number == "6"
@test B[findall(r -> r.id.number == "94", B)[1]+1].id.number == "94A"
@test B[findall(r -> r.id.number == "94A", B)[1]+1].id.number == "95"
@test B[findall(r -> r.id.number == "188", B)[1]+1].id.number == "1T"
@test B[findall(r -> r.id.number == "1T", B)[1]+1].id.number == "2T"
end
end
@testset "3BTT" begin
# ASN 115 from chain E has no CA
code = "3BTT"
pdb = read_file(txt(code), PDBFile)
pdbml = read_file(xml(code), PDBML)
res = pdb[97]
res_ml = pdbml[97]
res.id.name == "ASN"
res_ml.id.name == "ASN"
@test getCA(res) === missing
@test getCA(res_ml) === missing
end
@testset "Foldseek" begin
# PDB files generated by Foldseek have only 66 columns; element identifiers
# are missing (represented as empty strings). Those files only contain CA atoms.
pdb_file = joinpath(DATA, "foldseek_example.pdb")
pdb = read_file(pdb_file, PDBFile)
# The example file has only 8 residues and one chain (A)
for (i, res) in zip(1:8, pdb)
@test res.id.group == "ATOM"
@test res.id.number == string(i)
@test res.id.chain == "A"
@test length(res.atoms) == 1
atom = res.atoms[1]
@test atom.atom == "CA"
@test isempty(atom.element) # ""
# All residues in the example have 1.0 occupancy and 0.00 temperature factor
@test atom.occupancy == 1.0
@test atom.B == "0.00"
end
end
end
@testset "RESTful PDB Interface" begin
@testset "Percent-encoding" begin
@test PDB._escape_url_query("name=John Snow") == "name%3DJohn%20Snow"
unreserved = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~"
@test PDB._escape_url_query(unreserved) == unreserved
@test PDB._escape_url_query("£") == "%C2%A3"
@test PDB._escape_url_query("€") == "%E2%82%AC"
end
@test getpdbdescription("4HHB")["rcsb_entry_info"]["resolution_combined"][1] == 1.74
@test getpdbdescription("104D")["rcsb_entry_info"]["resolution_combined"] === nothing
mktemp() do path, io
filename = downloadpdbheader("4HHB", filename = path)
@test isfile(filename)
@test filename == path
file_content = read(filename, String)
@test occursin("resolution_combined", file_content)
@test occursin("1.74", file_content)
end
end
@testset "Write PDB files" begin
txt(code) = joinpath(DATA, string(uppercase(code), ".pdb"))
@testset "2VQC" begin
code = "2VQC"
io = IOBuffer()
pdb = read_file(txt(code), PDBFile)
print_file(io, pdb, PDBFile)
printed = split(String(take!(io)), '\n')
@test length(printed) == 609 # Only ATOM, HETATM & END + 1 because the trailing \n
@test printed[1] ==
"ATOM 1 N THR A 4 2.431 19.617 6.520 1.00 24.37 N "
@test printed[607] ==
"HETATM 607 O HOH A2025 13.807 38.993 2.453 1.00 33.00 O "
end
@testset "NMR" begin
code = "1AS5"
io = IOBuffer()
pdb = read_file(txt(code), PDBFile)
print_file(io, pdb, PDBFile)
printed = split(String(take!(io)), '\n')
@test sum(map(x -> startswith(x, "MODEL "), printed)) == 14 # 14 models
@test sum(map(x -> x == "ENDMDL", printed)) == 14 # 14 models
end
@testset "2 Chains" begin
code = "1IAO"
io = IOBuffer()
pdb = read_file(txt(code), PDBFile)
print_file(io, pdb, PDBFile)
printed = split(String(take!(io)), '\n')
# MIToS only prints TER for the ATOM group if the chain changes.
# Some modified residues are annotated as HETATM in the middle of the ATOM chain:
# TER can not be printed from ATOM to HETATM if the chain doesn’t change.
# Only prints TER between chain A and B
@test sum(map(x -> startswith(x, "TER "), printed)) == 1
@test filter!(s -> occursin(r"TER ", s), printed)[1] ==
"TER 1418 TRP A 178 "
end
@testset "read_file/write consistency" begin
io = IOBuffer()
for code in ["2VQC", "1IAO", "1NSA", "1HAG", "1IGY", "1DPO", "1AS5", "1CBN", "1SSX"]
readed = read_file(txt(code), PDBFile)
print_file(io, readed, PDBFile)
readed_writed_readed = parse_file(String(take!(io)), PDBFile)
@test readed_writed_readed == readed
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 3105 | @testset "Check amino acid residues" begin
@test PDB._is_aminoacid("MET") # methionine
@test PDB._is_aminoacid("MSE") # selenomethionine
@test !PDB._is_aminoacid("HOH") # water
@test !PDB._is_aminoacid("SO4") # sulfate
@test !PDB._is_aminoacid("MG") # magnesium
@test !PDB._is_aminoacid("CA") # calcium
@test !PDB._is_aminoacid("A") # adenine
@test PDB._is_aminoacid("ALA") # alanine
end
@testset "Extract protein sequences from PDB" begin
file(code) = joinpath(DATA, string(uppercase(code), ".pdb"))
@testset "2VQC: Missings & selenomethionines" begin
res = read_file(file("2VQC"), PDBFile)
# seq length : 118
# missing residues : 1-9, 80-118
# number of modelled residues : 70
seqs = modelled_sequences(res) # default group : All (ATOM + HETATM)
key = (model = "1", chain = "A")
seq = seqs[key]
@test length(seq) == 70
@test seq ==
"TLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIK"
# the first methionine is in fact a selenomethionine (MSE)
# "HETATM 52 CA MSE A 10"
seq_atom = modelled_sequences(res, group = "ATOM")[key]
@test length(seq_atom) == 70 - 2 # there are two missing selenomethionines in the
# sequence because we only selected ATOM
# TLNSYK M AEI M YKI
@test seq_atom ==
"TLNSYKAEIYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIK"
@test modelled_sequences(res, group = "HETATM")[key] == "MM"
end
@testset "1AS5: NMR" begin
res = read_file(file("1AS5"), PDBFile)
seqs = modelled_sequences(res)
@test length(seqs) == 14
for i = 1:14
seq = seqs[(model = string(i), chain = "A")]
@test length(seq) == 24
@test seq == "HPPCCLYGKCRRYPGCSSASCCQR"
end
# test the order
@test join(collect(k.model for k in keys(seqs))) == "1234567891011121314"
seqs_model_1 = modelled_sequences(res, model = "1")
@test length(seqs_model_1) == 1
@test seqs_model_1[(model = "1", chain = "A")] == seqs[(model = "1", chain = "A")]
end
@testset "1IGY: Insertions & chains" begin
res = read_file(file("1IGY"), PDBFile)
seqs = modelled_sequences(res)
# chains
@test length(seqs) == 4
@test seqs[(model = "1", chain = "A")] == seqs[(model = "1", chain = "C")]
@test seqs[(model = "1", chain = "B")] == seqs[(model = "1", chain = "D")]
# insertions in chain B
seqs_chain_B = modelled_sequences(res, chain = "B")
# do not include the chain A sequence when only chain B is demanded
@test !haskey(seqs_chain_B, (model = "1", chain = "A"))
# check the sequence of chain B
@test haskey(seqs_chain_B, (model = "1", chain = "B"))
seq_B = seqs_chain_B[(model = "1", chain = "B")]
@test seqs[(model = "1", chain = "B")] == seq_B
@test occursin("LSSL", seq_B)
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 6609 | @testset "Download" begin
pfam_code = "PF11591"
@test_throws ErrorException downloadpfam("2vqc")
filename = downloadpfam(pfam_code, filename = tempname() * ".gz")
try
aln = read_file(filename, Stockholm)
if size(aln) == (6, 34)
@test getannotfile(aln, "ID") == "2Fe-2S_Ferredox"
end
finally
rm(filename)
end
end
@testset "PDB code" begin
msa = read_file(joinpath(DATA, "PF09645_full.stockholm"), Stockholm)
@test getseq2pdb(msa)["F112_SSV1/3-112"] == [("2VQC", "A")]
end
@testset "Mapping PDB/Pfam" begin
msa_file = joinpath(DATA, "PF09645_full.stockholm")
sifts_file = joinpath(DATA, "2vqc.xml.gz")
pdb_file = joinpath(DATA, "2VQC.xml")
msa = read_file(msa_file, Stockholm, generatemapping = true, useidcoordinates = true)
cmap = msacolumn2pdbresidue(msa, "F112_SSV1/3-112", "2VQC", "A", "PF09645", sifts_file)
res = residuesdict(read_file(pdb_file, PDBML), model = "1", chain = "A", group = "ATOM")
# -45 20 pdb
#.....QTLNSYKMAEIMYKILEK msa seq
# 123456789012345678 msa col
# 345678901234567890 uniprot 3-20
# **** *
#12345678901234567890123 ColMap
@test_throws KeyError cmap[5] # insert
@test cmap[6] == "" # missing
@test cmap[7] == "4"
@test cmap[8] == "5"
@test cmap[23] == "20"
#.....QTLNSYKMAEIMYKILEKKGELTLEDILAQFEISVPSAYNIQRALKAICERHPDECEVQYKNRKTTFKWIKQEQKEEQKQEQTQDNIAKIFDAQPANFEQTDQGFIKAKQ..... msa seq
#.....X---HHHHHHHHHHHHHHHSEE-HHHHHHHH---HHHHHHHHHHHHHHHHH-TTTEEEEE-SS-EEEEE--XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX..... pdb ss
# 111111111111111111111 ColMap hundreds
# 111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999000000000011111111112 ColMap tens
#123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 ColMap ones
# ** **
@test_throws KeyError cmap[116] # insert
@test cmap[115] == "" # missing
@test cmap[77] == "" # missing
@test cmap[76] == "73"
@testset "Residues" begin
msares = msaresidues(msa, res, cmap)
@test_throws KeyError msares[5] # insert
@test_throws KeyError msares[6] # missing
@test msares[7].id.name == "THR" # T
@test msares[8].id.name == "LEU" # L
@test msares[23].id.name == "LYS" # K
@test_throws KeyError msares[116] # insert
@test_throws KeyError msares[117] # missing
@test_throws KeyError msares[77] # missing
@test msares[76].id.name == "LYS" # K
end
@testset "Contacts" begin
@test msacolumn2pdbresidue(
msa,
"F112_SSV1/3-112",
"2VQC",
"A",
"PF09645",
sifts_file,
strict = true,
checkpdbname = true,
) == cmap
@test length(res) == 70
@test length(unique(values(cmap))) == 71
# -45 20 pdb
#.....QTLNSYKMAEIMYKILEK msa seq
# 123456789012345678 msa col
#12345678901234567890123 ColMap
# 345678901234567890 uniprot 3-20
# *** *
@test_throws KeyError res["3"]
@test res[cmap[7]].id.number == "4"
contacts = msacontacts(msa, res, cmap, 6.05)
missings = sum(Int.(isnan.(contacts)), dims = 1)
@test size(contacts) == (110, 110)
@test missings[1] == 110 # missing
@test missings[2] == (110 - (70 - 1)) # 70 residues, 1 diagonal NaN
@test missings[3] == (110 - (70 - 1))
@test missings[18] == (110 - (70 - 1))
@test missings[110] == 110 # missing
@test missings[72] == 110 # missing
@test missings[71] == (110 - (70 - 1))
ncontacts = sum(Int.(contacts .== 1.0), dims = 1)
@test ncontacts[1] == 0
@test ncontacts[2] == 2
@test ncontacts[3] == 6
@testset "using msaresidues" begin
# Test MSA contact map using PDBResidues from the MSA
msares = msaresidues(msa, res, cmap)
ncol = ncolumns(msa)
colmap = getcolumnmapping(msa)
for i = 1:(ncol-1), j = (i+1):ncol
if (colmap[i] in keys(msares)) && (colmap[j] in keys(msares))
@test (contacts[i, j] .== 1.0) ==
contact(msares[colmap[i]], msares[colmap[j]], 6.05)
end
end
@testset "hasresidues" begin
mask = hasresidues(msa, cmap)
@test mask[1] == false
@test mask[2] == true
@test sum(mask) == length(msares)
end
end
@testset "AUC" begin
@test round(
AUC(
buslje09(msa, lambda = 0.05, threshold = 62.0, samples = 0)[2],
contacts,
),
digits = 4,
) == 0.5291
end
end
end
@testset "AUC" begin
ntru = 90
nfal = 100
score_tru = Float16[2 + 2x for x in randn(ntru)]
score_fal = Float16[-2 + 2x for x in randn(nfal)]
msacontacts = NamedArray(
PairwiseListMatrix{Float16,false,Vector{Float16}}(
vcat(ones(Float16, ntru), zeros(Float16, nfal)),
Float16[1.0 for x = 1:20],
20,
),
)
score = NamedArray(
PairwiseListMatrix{Float16,false,Vector{Float16}}(
vcat(score_tru, score_fal),
Float16[NaN for x = 1:20],
20,
),
)
correct = 1.0 - auc(roc(score_tru, score_fal))
@test AUC(score, msacontacts) == correct
score[1:end, 2] .= NaN
msacontacts[1:end, 3] .= NaN
list_score = getlist(getarray(score))
list_contact = getlist(getarray(msacontacts))
n_values = length(list_score)
@test n_values == length(list_contact)
tar = [
list_score[i] for i = 1:n_values if
!isnan(list_score[i]) & !isnan(list_score[i]) & (list_contact[i] == 1.0)
]
non = [
list_score[i] for i = 1:n_values if
!isnan(list_score[i]) & !isnan(list_score[i]) & (list_contact[i] == 0.0)
]
@test AUC(score, msacontacts) == AUC(roc(tar, non))
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 11862 | @testset "SIFTS Mappings" begin
sifts_file = joinpath(DATA, "2vqc.xml.gz")
@testset "parse" begin
dbs = [
(dbUniProt, "P20220"),
(dbPfam, "PF09645"),
(dbNCBI, "244589"),
(dbPDB, "2vqc"),
(dbSCOP, "153426"),
(dbPDBe, "2vqc"),
]
for (to, toid) in dbs, (from, fromid) in dbs
map = siftsmapping(sifts_file, from, fromid, to, toid)
for (k, v) in map
to == from && @test k == v
end
end
end
@testset "missings = false" begin
map = siftsmapping(
sifts_file,
dbPDBe,
"2vqc",
dbPDB,
"2vqc",
chain = "A",
missings = false,
)
@test_throws KeyError map["9"] # Missing
@test_throws KeyError map["80"] # Missing
@test_throws KeyError map["1"] # Missing
@test map["10"] == "4"
@test map["79"] == "73"
end
@testset "missings = true" begin
map = siftsmapping(sifts_file, dbPDBe, "2vqc", dbPDB, "2vqc", chain = "A")
@test map["9"] == "3" # Missing
@test map["80"] == "74" # Missing
@test map["1"] == "-5" # Missing # Negative Resnum
@test map["10"] == "4"
@test map["79"] == "73"
end
@testset "Insert codes" begin
# 1SSX : Residues with insert codes: 15A 15B
_1ssx_file = joinpath(DATA, "1ssx.xml.gz")
map = siftsmapping(_1ssx_file, dbPDBe, "1ssx", dbPDB, "1ssx", chain = "A")
residue_A = map["1"]
residue_B = map["2"]
residue_C = map["3"]
@test residue_A == "15A"
@test residue_B == "15B"
@test residue_C == "16"
mapII = siftsmapping(_1ssx_file, dbPDB, "1ssx", dbUniProt, "P00778", chain = "A")
@test mapII["15A"] == "200"
@test mapII["15B"] == "201"
@test mapII["16"] == "202"
end
@testset "Multiple InterProt annotations" begin
# 1CBN : Multiple InterProt annotations, the last is used.
# Identical PDBe ResNum for Residue 22:
#
# <residue dbSource="PDBe" dbCoordSys="PDBe" dbResNum="22" dbResName="SER">
# <crossRefDb dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="1cbn" dbResNum="22" dbResName="SER" dbChainId="A"/>
# <crossRefDb dbSource="UniProt" dbCoordSys="UniProt" dbAccessionId="P01542" dbResNum="22" dbResName="P"/>
# ....
# <residue dbSource="PDBe" dbCoordSys="PDBe" dbResNum="22" dbResName="PRO">
# <crossRefDb dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="1cbn" dbResNum="22" dbResName="PRO" dbChainId="A"/>
# <crossRefDb dbSource="UniProt" dbCoordSys="UniProt" dbAccessionId="P01542" dbResNum="22" dbResName="P"/>
# ...
_1cbn_file = joinpath(DATA, "1cbn.xml.gz")
map = siftsmapping(_1cbn_file, dbPDBe, "1cbn", dbInterPro, "IPR001010", chain = "A")
@test_throws KeyError map["1"] # Without InterPro
@test map["2"] == "2" # Same ResNum for different InterPros
mapII = read_file(_1cbn_file, SIFTSXML, chain = "A")
@test length(mapII[1].InterPro) == 0 # Without InterPro
@test length(mapII[2].InterPro) == 4 # Same ResNum for different InterPros
end
@testset "None" begin
# 4CPA : It has "None"
# <residue dbSource="PDBe" dbCoordSys="PDBe" dbResNum="2" dbResName="GLX">
# <crossRefDb dbSource="PDB" dbCoordSys="PDBresnum" dbAccessionId="4cpa" dbResNum="2" dbResName="GLX" dbChainId="J"/>
# <crossRefDb dbSource="SCOP" dbCoordSys="PDBresnum" dbAccessionId="118683" dbResNum="2" dbResName="GLX" dbChainId="J"/>
# <crossRefDb dbSource="InterPro" dbCoordSys="UniProt" dbAccessionId="IPR011052" dbResNum="None" dbResName="None" dbEvidence="SSF57027"/>
# <crossRefDb dbSource="InterPro" dbCoordSys="UniProt" dbAccessionId="IPR021142" dbResNum="None" dbResName="None" dbEvidence="PD884054"/>
# <crossRefDb dbSource="InterPro" dbCoordSys="UniProt" dbAccessionId="IPR004231" dbResNum="None" dbResName="None" dbEvidence="PF02977"/>
# <residueDetail dbSource="PDBe" property="codeSecondaryStructure">T</residueDetail>
# <residueDetail dbSource="PDBe" property="nameSecondaryStructure">loop</residueDetail>
# </residue>
_4cpa_file = joinpath(DATA, "4cpa.xml.gz")
map = read_file(_4cpa_file, SIFTSXML, chain = "J")
res = filter(r -> r.PDBe.number == "2", map)[1]
@test length(res.InterPro) == 3
for i = 1:3
@test res.InterPro[i].name == ""
@test res.InterPro[i].number == ""
end
end
@testset "NMR" begin
# 1AS5 : NMR
_1as5_file = joinpath(DATA, "1as5.xml.gz")
map = siftsmapping(_1as5_file, dbPDBe, "1as5", dbUniProt, "P56529", chain = "A")
# missings=true : NMR there are not missing residues
@test map["23"] == "73"
@test_throws KeyError map["24"] # Without UniProt
mapII = siftsmapping(_1as5_file, dbPDBe, "1as5", dbPDB, "1as5", chain = "A")
@test mapII["24"] == "24"
end
@testset "Inserted residues lack insertion code" begin
# 1DPO : Inserted residues lack insertion letters
# Single unnamed chain in 1DPO contains insertions at postions 184 (Gly, Phe),
# 188 (Gly, Lys), and 221 (Ala, Leu) but no insertion letters.
_1dpo_file = joinpath(DATA, "1dpo.xml.gz")
map = siftsmapping(_1dpo_file, dbPDBe, "1dpo", dbPDB, "1dpo", chain = "A")
# Unnamed chain is "A" in SIFTS
@test map["164"] == "184"
@test map["165"] == "184A" # Has insertion code in SIFTS
@test map["169"] == "188"
@test map["170"] == "188A" # Has insertion code in SIFTS
@test map["198"] == "221"
@test map["199"] == "221A" # Has insertion code in SIFTS
end
@testset "Insertion block" begin
# 1IGY : Insertions have more than one copy of the same amino acid in a single
# insertion block. For example, chain B in 1IGY contains a block of four residues
# inserted at sequence position 82. The block contains Leu-Ser-Ser-Leu.
_1igy_file = joinpath(DATA, "1igy.xml.gz")
map = siftsmapping(_1igy_file, dbPDBe, "1igy", dbCATH, "2.60.40.10", chain = "B")
@test map["82"] == "82"
@test map["83"] == "82A"
@test map["84"] == "82B"
@test map["85"] == "82C"
end
@testset "1HAG" begin
# 1HAG : Chain E begins with 1H, 1G, 1F, ... 1A, then 1 (in reverse alphabetic order)
_1hag_file = joinpath(DATA, "1hag.xml.gz")
map = siftsmapping(_1hag_file, dbPDBe, "1hag", dbPDB, "1hag", chain = "E")
@test map["1"] == "1H"
@test map["2"] == "1G"
@test map["3"] == "1F"
@test map["4"] == "1E"
@test map["5"] == "1D"
@test map["6"] == "1C"
@test map["7"] == "1B"
@test map["8"] == "1A"
@test map["9"] == "1"
end
end
@testset "1NSA" begin
# 1NSA : Contains a single (unnamed) protein chain with sequence 7A-95A that continues 4-308.
_1nsa_file = joinpath(DATA, "1nsa.xml.gz")
mapping = read_file(_1nsa_file, SIFTSXML)
@testset "findall & read" begin
four = findall(db -> db.id == "1nsa" && db.number == "4", mapping, dbPDB)[1]
@test findall(db -> db.id == "1nsa" && db.number == "95A", mapping, dbPDB)[1] + 1 ==
four
@test mapping[findall(
db -> db.id == "1nsa" && db.number == "95A",
mapping,
dbPDB,
)][1].PDB.number == "95A"
end
@testset "capture fields" begin
cap = map(mapping) do res
number = get(res, dbPDB, :number, "")
if get(res, dbPDB, :id, "") == "1nsa" && number == "95A"
number
else
missing
end
end
@test cap[[!ismissing(x) for x in cap]][1] == "95A"
end
end
@testset "find & filter" begin
# 1IAO : Contains in chain B (in this order) 1S, 323P-334P, 6-94, 94A, 95-188, 1T, 2T
mapp = read_file(joinpath(DATA, "1iao.xml.gz"), SIFTSXML)
@test filter(
db -> db.id == "1iao" && db.number == "1S" && db.chain == "B",
mapp,
dbPDB,
)[1].PDBe.number == "1"
i =
findall(db -> db.id == "1iao" && db.number == "1S" && db.chain == "B", mapp, dbPDB)[1]
res = mapp[i+2]
@test get(res, dbPDB, :id, "") == "1iao" &&
get(res, dbPDB, :chain, "") == "B" &&
get(res, dbPDB, :number, "") == "323P"
end
@testset "MIToS 1.0 error" begin
sf = joinpath(DATA, "4gcr.xml.gz")
mapping = siftsmapping(sf, dbPfam, "PF00030", dbPDB, "4gcr")
@test mapping["3"] == "2"
end
@testset "download" begin
pdb = "2vqc"
mapping = read_file(joinpath(DATA, "$(pdb).xml.gz"), SIFTSXML)
@test_throws AssertionError downloadsifts(pdb, source = "http")
@test_throws AssertionError downloadsifts(pdb, filename = "bad_name.txt")
@test_throws ErrorException downloadsifts("2vqc_A")
filename_https = downloadsifts(pdb, filename = tempname() * ".xml.gz")
@test length(read_file(filename_https, SIFTSXML)) == length(mapping)
filename_ftp = downloadsifts(pdb, filename = tempname() * ".xml.gz", source = "ftp")
@test length(read_file(filename_ftp, SIFTSXML)) == length(mapping)
end
@testset "Ensembl" begin
# 18GS has multiple Ensembl annotations for each residue
# 18GS also has EC and GO annotations
mapping = read_file(joinpath(DATA, "18gs.xml.gz"), SIFTSXML)
last_res = mapping[end]
@test length(last_res.Ensembl) == 2
@test last_res.Ensembl[1].id == last_res.Ensembl[2].id
@test last_res.Ensembl[2].transcript == "ENST00000642444"
@test last_res.Ensembl[1].transcript == "ENST00000398606"
@test last_res.Ensembl[2].transcript == "ENST00000642444"
@test last_res.Ensembl[1].translation == "ENSP00000381607"
@test last_res.Ensembl[2].translation == "ENSP00000493538"
@test last_res.Ensembl[1].exon == "ENSE00001921020"
@test last_res.Ensembl[2].exon == "ENSE00003822108"
end
@testset "SCOP2B" begin
# 1IVO has residues mapped into SCOP2B (05/08/2020)
mapping = read_file(joinpath(DATA, "1ivo.xml.gz"), SIFTSXML)
# First residue with SCOP2B annotation:
# <crossRefDb dbSource="SCOP2B" dbCoordSys="PDBresnum" dbAccessionId="SF-DOMID:8038760" dbResNum="312" dbResName="VAL" dbChainId="A"/>
pos = findfirst(res -> !ismissing(res.SCOP2B), mapping)
first = mapping[pos]
@test length(first.SCOP2) == 0
@test first.SCOP2B.id == "SF-DOMID:8038760"
@test first.SCOP2B.number == "312"
@test first.SCOP2B.name == "VAL"
@test first.SCOP2B.chain == "A"
end
@testset "SCOP2" begin
# 1XYZ has residues mapped to two different SCOP2 domains (05/08/2020)
mapping = read_file(joinpath(DATA, "1xyz.xml.gz"), SIFTSXML)
# First residue with SCOP2 annotations:
# <crossRefDb dbAccessionId="FA-DOMID:8030967" dbCoordSys="PDBresnum" dbSource="SCOP2" dbResName="ASN" dbResNum="516" dbChainId="A"/>
# <crossRefDb dbAccessionId="SF-DOMID:8043346" dbCoordSys="PDBresnum" dbSource="SCOP2" dbResName="ASN" dbResNum="516" dbChainId="A"/>
pos = findfirst(res -> length(res.SCOP2) > 0, mapping)
first = mapping[pos]
@test ismissing(first.SCOP2B)
@test length(first.SCOP2) == 2
@test first.SCOP2[1].id == "FA-DOMID:8030967"
@test first.SCOP2[1].number == "516"
@test first.SCOP2[1].name == "ASN"
@test first.SCOP2[1].chain == "A"
@test first.SCOP2[2].id == "SF-DOMID:8043346"
@test first.SCOP2[2].number == "516"
@test first.SCOP2[2].name == "ASN"
@test first.SCOP2[2].chain == "A"
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | code | 4706 | @testset "get_n_words!" begin
line = "#=GF AC PF00571"
@test get_n_words(line, 1) == String[line]
@test get_n_words(line, 2) == String["#=GF", "AC PF00571"]
@test get_n_words(line, 3) == String["#=GF", "AC", "PF00571"]
@test get_n_words(line, 4) == String["#=GF", "AC", "PF00571"]
@test get_n_words("\n", 1) == String["\n"]
@test get_n_words("#", 1) == String["#"]
# ASCII
str = "#=GR O31698/18-71 SS CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH"
@test get_n_words(str, 3) ==
String["#=GR", "O31698/18-71", "SS CCCHHHHHHHHHHHHHHHEEEEEEEEEEEEEEEEHHH"]
# UTF-8
str = "#=GF CC (Römling U. and Galperin M.Y. “Bacterial cellulose"
@test get_n_words(str, 3) ==
String["#=GF", "CC", "(Römling U. and Galperin M.Y. “Bacterial cellulose"]
str = "#=GF CC not present in all SecA2–SecY2 systems. This family of Asp5 is"
@test get_n_words(str, 3) == String[
"#=GF",
"CC",
"not present in all SecA2–SecY2 systems. This family of Asp5 is",
]
end
@testset "hascoordinates" begin
@test hascoordinates("O83071/192-246")
@test !hascoordinates("O83071")
end
@testset "select_element" begin
@test select_element([1]) == 1
@test_throws ErrorException select_element([])
end
@testset "Matrices to and from lists" begin
@testset "matrix2list" begin
mat = [
1 2 3
4 5 6
7 8 9
]
@test matrix2list(mat) == [2, 3, 6]
@test matrix2list(mat, diagonal = true) == [1, 2, 3, 5, 6, 9]
@test matrix2list(mat, part = "lower") == [4, 7, 8]
@test matrix2list(mat, part = "lower", diagonal = true) == [1, 4, 7, 5, 8, 9]
end
@testset "list2matrix" begin
mat = [
1 2 3
2 5 6
3 6 9
]
@test triu(list2matrix([2, 3, 6], 3), 1) == triu(mat, 1)
@test list2matrix([1, 2, 3, 5, 6, 9], 3, diagonal = true) == mat
end
end
@testset "General IO" begin
@testset "lineiterator" begin
# Julia 0.6: eachline return lines without line endings by default
ppap = "pen\npineapple\napple\npen\n"
@test collect(lineiterator(ppap)) == collect(eachline(IOBuffer(ppap)))
@test collect(lineiterator("Hola")) == ["Hola"]
@test collect(lineiterator("Hola\n")) == ["Hola"]
@test collect(lineiterator("\n")) == [""]
@test collect(lineiterator("Hola\nMundo")) == ["Hola", "Mundo"]
@test collect(lineiterator("Hola\nMundo\n")) == ["Hola", "Mundo"]
@test collect(lineiterator("Hola\nMundo\n\n")) == ["Hola", "Mundo", ""]
end
@testset "File checking" begin
file_path = joinpath(DATA, "simple.fasta")
@test isnotemptyfile(file_path)
@test !isnotemptyfile(joinpath(DATA, "emptyfile"))
@test check_file(file_path) == file_path
@test_throws ErrorException check_file("nonexistentfile")
end
@testset "Download file" begin
try
@test ".tmp" == download_file(
"http://www.uniprot.org/uniprot/P69905.fasta",
".tmp",
headers = Dict(
"User-Agent" => "Mozilla/5.0 (compatible; MSIE 7.01; Windows NT 5.0)",
),
)
finally
if isfile(".tmp")
rm(".tmp")
end
end
end
@testset "Download file: redirect" begin
try
# Use https://httpbin.io/ and example.com to test redirection
download_file("https://httpbin.io/redirect-to?url=https://example.com", ".tmp")
@test occursin("Example Domain", read(".tmp", String))
finally
isfile(".tmp") && rm(".tmp")
end
end
@testset "Test _check_gzip_file" begin
for file in readdir(DATA)
filename = joinpath(DATA, file)
if file != "2vqc.xml.gz" # is a decompressed file that has a wrong .gz extension
@test MIToS.Utils._check_gzip_file(filename) == filename
else
@test_throws ErrorException MIToS.Utils._check_gzip_file(filename)
end
end
end
@testset "Download a gz file" begin
# Use https://www.rcsb.org/pdb/files/3NIR.pdb.gz to test downloading a gz file
# without a filename
filename = ""
try
filename = download_file("https://www.rcsb.org/pdb/files/3NIR.pdb.gz")
@test endswith(filename, ".gz")
@test MIToS.Utils._check_gzip_file(filename) == filename
finally
if isfile(filename)
rm(filename)
end
end
end
end
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 1386 | # Contributing
MIToS is and **Open Source** project and there are different ways to contribute.
Please, use [GitHub issues](https://github.com/diegozea/MIToS.jl/issues) to
**report errors/bugs** or to **ask for new features**.
We welcome contributions in the form of **pull requests**. For your code to be considered
it must meet the following guidelines.
- By making a pull request, you're agreeing to license your code under a MIT license.
- Types and functions must be documented using Julia's docstrings.
- All significant code must be tested.
## Style
- Type names are camel case, with the first letter capitalized.
E.g. `MultipleSequenceAlignment`.
- Function names, apart from constructors, are all lowercase. Include underscores between
words only if the name would be hard to read without. E.g. `covalentradius`,
`check_atoms_for_interactions`.
- Names of private (unexported) functions begin with underscore.
- Separate logical blocks of code with blank lines.
- Generally try to keep lines below 92-columns, unless splitting a long line onto multiple
lines makes it harder to read.
- Try to use a 4 spaces indentation
### Documentation
- Do not include headers/titles in the docstrings
- Please include examples if it's possible
## Conduct
We adhere to the [Julia community standards](http://julialang.org/community/standards/).
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 34224 | ## MIToS.jl Release Notes
### Changes from v2.22.0 to v3.0.0
**MIToS v3.0.0** requires Julia v1.9 or higher, dropping support for older versions. This
release introduces several breaking changes to improve the usability of the package.
When possible, deprecation warnings are used to inform you of the changes.
#### MIToS.MSA
The MSA module now includes ways to read, write, and work with unaligned protein sequences:
- The `MSA` module now exports the `AnnotatedSequence` type to represent a single protein
sequence with annotations. This type is a subtype of the new `AbstractSequence` type,
a subtype of the new `AbstractResidueMatrix` type.
- The `MSA` module now exports the `sequence_id` function to get the identifier of a
sequence object.
- The `MSA` module now defines the `FASTASequences`, `PIRSequences`, and `RawSequences`
file formats to read and write (unaligned) protein sequences in FASTA, PIR, and raw
formats, respectively.
- *[Breaking change]* The behavior of the `getannotresidue`, `getannotsequence`,
`setannotresidue!`, and `setannotsequence!` functions have changed for sequences objects,
such as `AnnotatedSequence`, `AnnotatedAlignedSequence`, and `AlignedSequence`. Now, these
functions take the feature name, rather than the sequence name, as the second
positional argument. As an example of migration,
`getannotsequence(sequence, "sequence_name", "feature_name")` should be replaced by
`getannotsequence(sequence, "feature_name")`. You still need to specify the sequence name
when working with MSA objects.
Other changes in the MSA module are:
- *[Breaking change]* The `join` function for `AnnotatedMultipleSequenceAlignment` objects
is deprecated in favor of the `join_msas` function.
- *[Breaking change]* The `Clusters` type is no longer a subtype of `ClusteringResult` from
the `Clustering.jl` package. Instead, the `Clusters` type is now a subtype of the new
`AbstractCluster` type. Support for the `Clustering.jl` interface is still available
through package extensions. You now need to load the `Clustering.jl` package to use the
`assignments`, `nclusters`, and `counts` functions.
#### MIToS.PDB
The PDB module now depends on the `BioStructures` package. The main changes in the PDB
module are:
- The `PDB` module now exports the `MMCIFFile` file format to read and write PDB files in
the mmCIF format (using `BioStructures` under the hood).
- *[Breaking change]* The `download_alphafold_structure` function can now download the
predicted structures from the *AlphaFold Protein Structure Database* using the mmCIF
format (`format=MMCIFFile`). This is the new default format. Therefore, you should use
`format=PDBFile` to get a PDB file like before. For example,
`download_alphafold_structure("P00520")` in previous versions is the same as
`download_alphafold_structure("P00520", format=PDBFile)` in this version.
- *[Breaking change]* The `downloadpdb` function now returns a mmCIF file by default.
Therefore, you should use `format=PDBML` to get a PDBML file. As an example of migration,
`downloadpdb("1IVO")` should be replaced by `downloadpdb("1IVO", format=PDBML)`, unless
you want to get a mmCIF file.
- *[Breaking change]* The `PDBAtom` type now adds two extra fields: `alt_id` and `charge`
to represent the alternative location indicator and the atom's charge, respectively.
This improves the compatibility with the mmCIF format and the `BioStructures` package.
- *[Breaking change]* The `query_alphafolddb` function now returns the EntrySummary object
of the returned JSON response instead of the Root list. Therefore, there is no need to
take the first element of the list to get the required information. For example,
`query_alphafolddb("P00520")[1]["uniprotId"]` would be replaced by
`query_alphafolddb("P00520")["uniprotId"]`.
#### MIToS.Utils.Scripts
- *[Breaking change]* The `MIToS.Utils.Scripts` module and the MIToS scripts have been
moved to their package at [MIToS_Scripts.jl](https://github.com/MIToSOrg/MIToS_Scripts.jl).
Therefore, the `MIToS.Utils.Scripts` module is no longer exported. This allows for a
reduction in the number of MIToS dependencies and improved load time.
### Changes from v2.21.0 to v2.22.0
This versions introduces several breaking changes to improve the usability of the
`Information` module. The main changes are:
- *[Breaking change]* The `Information` module deprecates the `Counts` type in favor of
the new `Frequencies` type. The new type as the same signature and behavior as the old one.
- *[Breaking change]* The `count` function on sequences has been deprecated in favor of the
`frequencies` function, which has the same signature and behavior as the old one.
- *[Breaking change]* The `count!` function is deprecated in favor of `frequencies!`.
The new function use keyword arguments to define the weights and pseudocounts. As an
example of migration, `count!(table, weights, pseudocounts, seqs...)` should be replaced
by `frequencies!(table, seqs..., weights=weights, pseudocounts=pseudocounts)`.
- *[Breaking change]* The `probabilities!` method using positional arguments for the
weights, pseudocounts and pseudofrequencies is deprecated in favor the one that uses
keyword arguments. As an example of migration,
`probabilities!(table, weights, pseudocounts, pseudofrequencies, seqs...)`
should be replaced by
`probabilities!(table, seqs..., weights=weights, pseudocounts=pseudocounts, pseudofrequencies=pseudofrequencies)`.
- *[Breaking change]* The `Information` has deprecated the `entropy` method on
`Frequencies` and `Probabilities` in favor of the `shannon_entropy` function. The
definition of the base is now done using the `base` keyword argument. As an example of
migration, `entropy(p, 2)` should be replaced by `shannon_entropy(p, base=2)`.
- *[Breaking change]* The `marginal_entropy` methods based on positional arguments are
deprecated in favor of a method relying on the `margin` and `base` keyword arguments.
As an example of migration, `marginal_entropy(p, 2, 2.0)` should be replaced by
`marginal_entropy(p, margin=2, base=2.0)`.
- *[Breaking change]* The `mutual_information` method based on positional arguments is
deprecated in favor of a method relying on the `base` keyword argument. As an example of
migration, `mutual_information(p, 2)` should be replaced by `mutual_information(p, base=2)`.
- *[Breaking change]* The `mapcolpairfreq!` and `mapseqpairfreq!` functions now uses the
boolean `usediagonal` keyword argument to indicate if the function should be applied to
the diagonal elements of the matrix (the default is `true`). Before, this was done passing
`Val{true}` or `Val{false}` as the last positional argument.
- The `mapcolfreq!`, `mapseqfreq!`, `mapcolpairfreq!`, and `mapseqpairfreq!` methods using
keyword arguments, now pass the extra keyword arguments to the mapped function.
- The `Information` module now exports the `mapfreq` function that offers a more high-level
interface to the `mapcolfreq!`, `mapseqfreq!`, `mapcolpairfreq!`, and `mapseqpairfreq!`
functions. This function allows the user to map a function to the residue frequencies or
probabilities of the columns or sequences of an MSA. When `rank = 2`, the function is
applied to pairs of sequences or columns.
- The `Information` module now exports methods of the `shannon_entropy`, `kullback_leibler`,
`mutual_information`, and `normalized_mutual_information` functions that take an
`AbstractArray{Residue}` as input, e.g. an MSA. Those methods use the `mapfreq` function
under the hood to ease the calculation of the information measures on MSAs.
- The `frequencies!`, `frequencies`, `probabilities!`, and `probabilities` functions now
accept arrays of `Residue`s of any dimension. Therefore, there is no need to use the
`vec` function to convert the arrays to vectors.
- The `MSA` module now exports the `WeightType` union type to represent `weights`.
### Changes from v2.20.0 to v2.21.0
- *[Breaking change]* The `buslje09` and `BLMI` functions from the `Information` module does
not longer accept a filename and a file format as arguments. You should explicitly read
the MSA using the `read_file` function and then run the `buslje09` or `BLMI` functions
on the returned MSA object. As an example of migration, `buslje09("msa.sto", "Stockholm")`
should be replaced by `buslje09(read_file("msa.sto", Stockholm))`.
### Changes from v2.19.0 to v2.20.0
- *[Breaking change]* The PDB module has deprecated `residues` and `@residues` in favor of
the `select_residues` function that uses keyword arguments.
So, `residues(pdb, "1", "A", "ATOM", All)` or `@residues pdb "1" "A" "ATOM" All` should be
replaced by `select_residues(pdb, model="1", chain="A", group="ATOM")`.
- *[Breaking change]* The PDB module has deprecated `atoms` and `@atoms` in favor of
the `select_atoms` function that uses keyword arguments.
So, `atoms(pdb, "1", "A", "ATOM", All, "CA")` or `@atoms pdb "1" "A" "ATOM" All "CA"` should be
replaced by `select_atoms(pdb, model="1", chain="A", group="ATOM", atom="CA")`.
- *[Breaking change]* The PDB module has deprecated the methods of the `isresidue` and
`residuesdict` functions that rely on positional arguments in favor of the keyword arguments.
So, `isresidue(pdb, "1", "A", "ATOM", "10")` should be replaced by
`isresidue(pdb, model="1", chain="A", group="ATOM", residue="10")`. Similarly,
`residuesdict(pdb, "1", "A", "ATOM", All)` should be replaced by
`residuesdict(pdb, model="1", chain="A", group="ATOM")`.
### Changes from v2.18.0 to v2.19.0
- *[Breaking change]* The `shuffle` and `shuffle!` functions are deprecated in favor of the
`shuffle_msa` and `shuffle_msa!` functions. The new functions take `dims` and
`fixedgaps` as keyword arguments instead of taking them as positional ones. The new
functions add a last positional argument to allow the selection of specific sequences
or columns to shuffle. Also, it adds the `fixed_reference` keyword argument to keep the
residues in the reference sequence fixed during the shuffling. As an example of migration,
`shuffle!(msa, 1, false)` should be replaced by `shuffle_msa!(msa, dims=1, fixedgaps=false)`.
### Changes from v2.17.0 to v2.18.0
- *[Breaking change]* The `read`, `parse`, `write`, and `print` functions for different
`FileFormat`s have been deprecated in favor of the `read_file`, `parse_file`,
`write_file`, and `print_file` functions. The new functions keep the same signature and
behavior as the old ones.
### Changes from v2.16.0 to v2.17.0
- *[Breaking change]* The `download_file` now uses the `Downloads.jl` module instead of
`HTTP.jl`. Therefore, the `download_file` function now accepts the `Downloads.download`
keyword arguments. In particular, the `redirect` and `proxy` keyword arguments are no
longer needed.
- The `MSA` module now exports the `A2M` and `A3M` file formats, to allow reading and
writing MSA files in these formats.
### Changes from v2.15.0 to v2.16.0
MIToS v2.16.0 drops support for *Julia 1.0*. This release requires *Julia 1.6* or higher.
- *[Breaking change]* The `transpose` function is now deprecated for MSA and sequences
(`AbstractAlignedObject`s). Use `permutedims` instead.
- *[Breaking change]* MIToS is now using `JSON3.jl` instead of `JSON.jl`. That change the
returned type of `getpdbdescription` from `Dict{String, Any}` to `JSON3.Object`.
Since the `JSON3.Object` supports the `Dict` interface, the change should not cause any
issues. If you want to convert the returned `JSON3.Object` to a `Dict{String, Any}`
you can use the `MIToS.PDB.JSON3.copy` function.
- The `PDB` module now defines the `query_alphafolddb` and `download_alphafold_structure`
functions to query the *AlphaFold Protein Structure Database* and download the
predicted structures.
- This version solves a bug when reading MSA files with `|` in the sequence names.
- MIToS is now using `Format.jl` instead of `Formatting.jl`.
### Changes from v2.14.1 to v2.15.0
- The `MSA` module now exports the `rename_sequences!` and `rename_sequences` functions to
rename the sequences of an MSA object.
### Changes from v2.14.0 to v2.14.1
- The `modelled_sequences` function now returns only the selected chains, therefore avoid
the inclusion of empty sequences in the output.
### Changes from v2.13.1 to v2.14.0
- The `MSA` now defines `join` for MSA objects, allowing to join or merge two
`AnnotationMultipleSequenceAlignment` objects based on a list of matching sequences or columns.
- The `MSA` module now defines `hcat` and `vcat` for MSA objects, taking care of sequence
and column names, and MSA annotations.
- The `MSA` now exports the `sequencename_iterator` and `columnname_iterator` functions to
return an iterator over the sequence or column names of an MSA.
- The `MSA` now exports the `sequence_index` and `column_index` functions to return the
integer position of a sequence or column name in an MSA.
- `merge` and `merge!` are now defined for `Annotations` objects in the `MSA` module.
### Changes from v2.13.0 to v2.13.1
- The `PDB` module can now parse the 66-character width columns of the PDB files created
by *Foldseek*. These structures contain only the alpha carbons and do not have the column
determining the element symbol.
### Changes from v2.12.0 to v2.13.0
- The `PDB` module now includes the `modelled_sequences` function, allowing extraction of
protein sequences from a specified structure.
- The `PDB` module exports the `is_aminoacid` function to determine whether
a `PDBResidue` represents an amino acid residue. This function is utilized by
the `modelled_sequences` function.
- The `Utils` module now exports the `THREE2ONE` constant, which is a dictionary mapping
three-letter amino acid residue codes to their corresponding one-letter codes.
### Changes from v2.11.1 to v2.12.0
- The `downloadsifts` function now downloads the SIFTS files from the PDBe HTTPS server
instead of the previous FTP server. This improves error handling during the download
process, making it more robust by relying on the `download_file` function. If you prefer
the previous behavior, you can set the new keyword argument `source` to `"ftp"`.
- It resolves an issue with the representation of Multiple Sequence Alignments and
ContingencyTables in the `show` methods by always using explicit MIME types.
- *[Breaking change]* The `show` methods that accept only two elements without an explicit
MIME type are now deprecated.
### Changes from v2.11.0 to v2.11.1
- MIToS now checks the magic number of gzip files immediately after download. If
the gzip file does not have the correct header, MIToS will attempt to download
it again. In Julia versions below 1.2, it will retry the download once. In
Julia 1.2 or higher, it will retry the download five times, using an
ExponentialBackOff.
### Changes from v2.10.0 to v2.11.0
- *[breaking change]* `getCA` returns `missing` if a `PDBResidue` has no CA atom
(before it was an `AssertionError`).
### Changes from v2.9.0 to v2.10.0
- *[breaking change]* `downloadsifts` now uses `Base.download` instead of `download_file` as HTTP (1.7 or lower) doesn't support FTP. Because of that, it doesn't accept keywords argument as `download_file` besides `filename`.
- MIToS now supports HTTP 1.0 and has migrated from using `HTTP.request` to using `HTTP.download` for `MIToS.Utils.download_file` dropping support on HTTP 0.8. Thanks, @kool7d!
- The `downloadpfam` function now uses the InterPro API, as the [Pfam website has been discontinued](https://xfam.wordpress.com/2022/08/04/pfam-website-decommission/). Thanks, @timholy!
- The `downloadpfam` function now has an `alignment` keyword argument for choosing which Pfam alignment download. The options are `"full"` (the default), `"seed"` and `"uniprot"`.
- MIToS switched to GitHub Actions for CI. Thanks, @timholy!
### Changes from v2.8.6 to v2.9.0
- New `matches` keyword argument in the `superimpose` function to determine the residues to be aligned. Thanks, @timholy!
### Changes from v2.8.1 to v2.8.6
- You can pass keyword arguments from `downloadsifts` to `download_file`.
### Changes from v2.8.1 to v2.8.5
- Fix bugs when concatenating concatenated MSAs using `hcat`.
### Changes from v2.8.1 to v2.8.4
- Ensure that `gaussdca` use the correct project file.
### Changes from v2.8.1 to v2.8.3
- Increase `PairwiseListMatrices` required version.
- Fix bugs when concatenating concatenated MSAs using `hcat`.
### Changes from v2.8.0 to v2.8.1
Fix bug when `read`ing `hcat` generated MSA in `Stockholm` format.
### Changes from v2.7.0 to v2.8.0
Multiple bug fixes and improvements related to `getindex` and `hcat`.
- *[breaking change]* MSA `getindex` can now change the order of the columns
in an `AnnotatedMultipleSequenceAlignment`.
- *[breaking change]* `convert` to MSA and sequence objects is now deprecated;
use the corresponding constructor.
- `gethcatmapping` to get the mapping to the concatenated MSAs.
### Changes from v2.6.1 to v2.7.0
- *[breaking change]* MSA `getindex` with `:` or arrays now return an object of
the same type. The annotations of an `AnnotatedMultipleSequenceAlignment` are
modified according to the selection.
- *[breaking change]* MSA `getindex` can now change the order of the sequences
in an `AnnotatedMultipleSequenceAlignment`.
- It adds `hcat` support for MSA objects, taking care of the MSA annotations.
### Changes from v2.6.0 to v2.6.1
- `download_file` and other `download...` functions now use the proxy settings
declared with the `HTTP_PROXY` and `HTTP_PROXY` environment variables.
### Changes from v2.5.0 to v2.6.0
- The RESTful API of PDB has changed, and the Legacy Fetch API Web Service was shut down on
December 9th, 2020. To adapt to the new changes, `PDBMLHeader` has been deprecated, and the
`downloadpdbheader` and `getpdbdescription` functions now return different objects.
### Changes from v2.4.0 to v2.5.0
MIToS v2.5.0 drops support for *Julia 0.7* and adds support for *Julia 1.5* and
includes several bug fixes.
- `Cookbook` section added to the docs using [Literate](https://github.com/fredrikekre/Literate.jl)
- The `SIFTS` module now includes the `dbSCOP2` and `dbSCOP2B` databases.
- `siftsmapping` now returns an `OrderedDict` instead of a `Dict`.
- `msacolumn2pdbresidue` now return an `OrderedDict` instead of a `Dict`.
### Changes from v2.3.0 to v2.4.0
MIToS v2.4 uses `Project.toml` and includes several bug fixes.
- The `SIFTS` module includes the `dbEnsembl` database and `warn`s again about unused databases.
### Changes from v2.2.0 to v2.3.0
MIToS v2.3 requires Julia v0.7 or v1.0. This release drops Julia 0.6 support.
- `Formatting.jl` is used in place of `Format.jl`.
- `SIFTS.get` returns the desired object or `missing` instead of `Nullable`s.
- `SIFTS` function doesn't `warn` about unused databases.
#### Julia 0.7/1.0 deprecations
- `bits` was deprecated to `bitstring`.
- `'` and `.'` are deprecated for alignments and sequences, use `transpose` or
`permutedims` instead. `ctranspose` is not longer available for matrices of `Residue`s.
### Changes from v2.1.2 to v2.2
- `PIR` `FileFormat` is included to read and write alignments in PIR/NBRF format.
- `Utils.Format` was renamed to `Utils.FileFormat`.
- `HTTP.jl` is used in place of `FTPClient.jl` and the deprecated `Requests.jl` in
`Utils.download_file` to download files.
- `Format.jl` is used in place of `Formatting.jl`.
- Solve bug in the printing of matrices of `Residue`s using `FileFormat`s.
### Changes from v2.1.1 to v2.1.2
- `FTPClient.jl` is used in `Utils.download_file` to download files from FTP.
- `CodecZlib.jl` is used in place of `GZip.jl` speeding up the parsing of compressed files.
- Improvements in MSA and PDB parsing speed.
- Improvement in `MSA.percentidentity` speed.
- `Information.gaussdca` now uses Julia's `serialize` and `deserialize` instead of `JLD`.
- `ROCAnalysis.jl` is not longer a dependency and it's now used with `@require` from
`Requires.jl`. To use the `AUC` function you need to do `using ROCAnalysis`.
### Changes from v2.1 to v2.1.1
- The script `Conservation.jl` was added to measure residue conservation of MSA columns.
- The script `SplitStockholm.jl` now has a progress bar thanks to Ellis Valentiner
@ellisvalentiner.
### Changes from v2.0 to v2.1
MIToS v2.1 requires Julia v0.6. This release drops Julia 0.5 support.
- `get_n_words(...` doesn't remove the last newline character, use `get_n_words(chomp(...`
to get the previous behaviour.
### Changes from v1.2.3 to v2.0
**MIToS 2.0** is the first MIToS version with **Julia 0.5** support
(It drops Julia 0.4 support). The last Julia version introduces new awesome features like
native multi-threading support, fast anonymous functions, generator expressions and more.
Also, the Julia package ecosystem has grown. So, MIToS was slightly redesigned to take
advantage of the new Julia capabilities. As a consequence, this version introduces several
breaking changes and new features.
##### Utils module
- `deleteitems!(vector::Vector, items)` is deprecated in favor of
`filter!(x -> x ∉ items, vector)`.
- `All` is used instead of MIToS 1.0 `"all"` or `"*"`, because it's possible to dispatch on it.
###### Vectorized queries are deprecated
Previous version of Utils included methods and types in order to overcome the performance
cost of functional programing in previous Julia versions. In particular, vectorized queries
were performed using subtypes of `AbstractTest`, in particular the `TestType`s `Is` and
`In` and the `TestOperation` `Not`. This types were used as argument to the query methods
`capture` and `isobject`. This operation were fused and vectorized with the methods:
`findobjects`, `collectobjects` and `collectcaptures`. All these functions and types are
deprecated in MIToS 2.0. Functional programming in Julia 0.5 is fast, so these methods
can be easily replace by Julia higher order functions like `find` and `filter` and lambda
expressions (anonymous functions).
##### MSA module
- `Residue` is now encoded as `Int` instead of being encoded as `UInt8`, allowing faster
indexation using `Int(res::Residue)`. More memory is used, since the residues are encoded
using 32 or 64 bits instead of 8 bits.
- `XAA` is now used to indicate unknown, ambiguous and non standard residues instead of `GAP`.
- Conversions to and from `UInt8` aren't supported now.
- More `Base` methods are extended to work with `Residue`: `bits`, `zero`, `one`
and `isvalid`.
- `empty(Annotations)` was deprecated, use `Annotations()` instead.
- `msa["seq_name",:]` now returns a `NamedArray{Residue,1}` instead of an aligned sequence,
use `getsequence(msa,"seqname")` to get an aligned sequence with annotations.
- The `names` function was replaced by the `sequencenames` function. A `columnnames`
function was also added.
- Aligned sequences don't drop dimensions, so there are matrices instead of vectors. You can
use `vec(...)` or `squeeze(...,1)` to get a vector instead of the matrix.
- Indexing MSA objects with only one string is deprecated, use `msa["seqname",:]` instead
of `msa["seqname"]`.
- `empty!` doesn't take MSA objects anymore.
- `asciisequence` was replaced by `stringsequence`.
- `deletenotalphabetsequences` and the parse/read keyword argument `checkalphabet` are
deprecated since MIToS 2.0 uses Residue('X') to represent residues outside the alphabet. You
can use `filtersequences!(msa, vec(mapslices(seq -> !in(XAA, seq), msa, 2)))` to delete
sequences with unknown, ambiguous or non standard residues.
- `parse`/`read` and MSA file returns an `AnnotatedMultipleSequenceAlignment` by default.
- `shuffle_...columnwise!` and `shuffle_...sequencewise!` functions were deprecated in
favor of `shuffle!` and `shuffle` functions.
- `SequenceClusters` was renamed to `Clusters`.
- Residue alphabet types were added. All alphabet types are subtypes of `ResidueAlphabet`.
In particular, three types are exported: `GappedAlphabet`, `UngappedAlphabet` and
`ReducedAlphabet`. The last type allows the creation of custom reduced alphabets.
- In order to keep the sequence name, `AlignedSequence` and `AnnotatedAlignedSequence` are
now matrices instead of vectors.
##### PDB module
- The keyword argument `format` of `downloadpdb` should be a type (`PDBFile` or `PDBML`)
instead of a string (`pdb` or `xml`) as in MIToS 1.0.
- `read` and `parse` now has the `occupancyfilter` keyword argument.
- `read` and `parse` now has the `label` keyword argument for `PDBML` files.
- `residues`, `àtoms` and similiar functions don't take vectors or sets anymore. Use an
anonymous function instead, e.g.: `x -> x in set_of_residue_numbers`.
- The functions `isresidue`, `isatom` and `residuepairsmatrix` were added.
##### SIFTS module
- The `get` function has a more complex signature for `SIFTSResidue`s to make simpler
the access of data.
- `find`, `filter` and `filter` now takes a database type as a third parameter when a vector
of `SIFTSResidue`s is the second parameter. It allows to use a function that directly
operates over the database type if it's available.
- `SIFTSResidue`s now also store secondary structure data in the `sscode` and `ssname` fields.
##### Information module
- `ResidueProbability` and `ResidueCount` were deprecated in favor of `ContingencyTable`.
`Probabilities` and `Counts` were added as wrappers of `ContingencyTable` to allow dispach
in a some functions, e.g. `entropy`.
- The last parameter of contingency tables is now a subtype of `ResidueAlphabet` instead
of a `Bool`, i.e.: `UngappedAlphabet`, `GappedAlphabet` or `ReducedAlphabet`.
- Creation of empty contingecy tables chaged.
e.g. `zeros(ResidueProbability{Float64, 2, false})` changed to
`ContingencyTable(Float64, Val{2}, UngappedAlphabet())` and
`ResidueProbability{Float64, 2, false}()` changed to
`ContingencyTable{Float64, 2, UngappedAlphabet}(UngappedAlphabet())`.
- `count!` and `probabilities!` signatures changed. The first argument is alway a
`ContingencyTable`, the second positional argument a clustering weight object
(use `NoClustering()` to skip it), the third positional argument is a pseudocount object
(use `NoPseudocount()` to avoid the use of pseudocounts) and `probabilities!` takes also a
`Pseudofrequencies` object (use `NoPseudofrequencies()` to avoid pseudofrequencies). The
last positional arguments are the vector of residues used to fill the contingency table.
- `count` and `probabilities` now takes the sequences as only positional arguments. The
output is always a table of `Float64`. Both functions take the keyword arguments
`alphabet`, `weights` and `pseudocounts`. `probabilities` also has a `pseudofrequencies`
keyword argument.
- `apply_pseudofrequencies!` changed its signature. Now it takes a `ContingencyTable` and
a `Pseudofrequencies` object.
- The function `blosum_pseudofrequencies!` was deprecated in favor of introducing a
`BLOSUM_Pseudofrequencies` type as subtype of `Pseudofrequencies` to be used in
`probabilities`, `probabilities!` and `apply_pseudofrequencies!`.
- Because higher-order function are fast in Julia 0.5, measure types
(i.e. subtypes of `AbstractMeasure`) were deprecated in favor of functions. In particular,
`MutualInformation` was replaced with the `mutual_information` function,
`MutualInformationOverEntropy` was replaced with `normalized_mutual_information`,
`KullbackLeibler` was replaced with `kullback_leibler` and `Entropy` was replaced with
`entropy`.
- The functions `estimate`, `estimate_on_marginal` , `estimateincolumns` and
`estimateinsequences` were deprecated because measure types are not longer used.
- `estimate_on_marginal(Entropy...` was deprecated in favor of the `marginal_entropy`
function.
- `estimateincolumns` and `estimateinsequences` were deprecated in favor of `mapcolfreq!`,
`mapseqfreq!`, `mapcolpairfreq!` and `mapseqpairfreq`.
- Keyword argument `usegaps` is deprecated in `buslje09` and `BLMI` in favor of `alphabet`.
- `cumulative` function was added to calculate cumulative MI (cMI).
* * *
### Changes from v1.1 to v1.2.2
- `using Plots` to use `plot` with `AbstractVector{PDBResidue}` to visualize coordinates
of the C alpha of each residue.
- Re-exports `swap!` from **IndexedArrays.jl**.
- *[breaking change]* **Distances.jl** now uses `--inter` instead of `--intra`.
- *docs* and *cookbook* are now in [MIToSDocumentation](https://github.com/diegozea/MIToSDocumentation)
* * *
### Changes from v1.0 to v1.1
- **RecipesBase** is used to generate plot recipes for MIToS’ objects. MSA objects can be
visualized `using Plots` (thanks to Thomas Breloff @tbreloff ).
- Functions to perform structural superimposition were added to the `PDB` module
(thanks to Jorge Fernández de Cossío Díaz @cosio ) : `center!`, `kabsch`, `rmsd`.
- The `PDB` module adds the following functions to make easier structural comparison:
`getCA`, `CAmatrix`, `coordinatesmatrix`, `centeredcoordinates`, `centeredresidues`,
`change_coordinates`, `superimpose`, `mean_coordinates` and `rmsf`.
- When PDB or PDBML files are being parsed, It’s possible to indicate if only atoms with
the best occupancy should be loaded (`occupancyfilter=true`, `false` by default).
- When `PDBML` files are being parsed, is possible to used the new `label` keyword argument
to indicate if "auth" (`false`) or "label" (`true`) attributes should be used.
- `bestoccupancy!` was deprecated in favor of `bestoccupancy`.
- The `MSA` module export the function `percentsimilarity` to calculate the similarity
percent between aligned sequences.
- `msacolumn2pdbresidue` has two new keyword arguments, `strict` and `checkpdbname`, to
perform extra tests during the mapping between PDB and MSA residues.
- `msacolumn2pdbresidue` has a new `missings` keyword argument to indicate if missing
residues should be included in the mapping (default: `true`).
- The `MSA` now exports the `residue2three` and `three2residue` function to convert
`Residue`s to and from their three letter names.
- The `MSA` module now exports `sequencepairsmatrix`, `columnpairsmatrix`, `columnlabels`,
and `sequencelabels` to help in the construction of matrices for MSA sequences or columns
pairwise comparisons.
- The `Information` module, if `GaussDCA` is installed, allows to call its `gDCA` function
from MIToS through the `gaussdca` function.
- The `Information` module now exports the `KullbackLeibler` measure.
- Now is possible to `print` and `write` `PDBResidue`s as `PDBFile`s.
- The function `proximitymean` now has a keyword argument `include` to indicate if the
residue score should be included in the mean.
- The module `Scripts` inside the `Utils` module has a new function `readorparse` to help
parsing `STDIN` in MIToS’ scripts.
**MIToS v1.1** also includes several **bug fixes**, some **performance improvements** and a
more complete **documentation**.
* * *
### Changes from v0.1 to v1.0
- `Pfam` module for working with *Pfam* alignments and useful parameter optimization
functions (i.e. `AUC`).
- *[breaking change]* The `Clustering` module was deleted and its functions moved to the
`MSA` module.
- `MSA` uses `ClusteringResult` from the `Clustering.jl` package instead of `AbstractClusters`.
+ `Clusters` was renamed to `SequenceClusters`
+ `MSA` adds the `counts` and `assignments` functions from the `Clustering.jl` interface.
+ *[breaking change]* The `getnclusters` function is now `nclusters` in the `Clutering` module.
- *[breaking change]* All the MSA `...percentage` functions were renamed to `...fraction`
and `percent...` functions now return real percentages (not fractions) values.
Functions taking identity thresholds, now also take real percentages
(values between 0.0 and 100.0).
- *[breaking change]* Script command line arguments changed to: define the number of
workers, use STDIN and STDOUT (pipelines), get better output names, use real flag arguments.
- `InformationMeasure` renamed to `AbstractMeasure`.
- New functions added to `MSA` module.
+ `annotations`, `names`.
+ `meanpercentidentity` allows fast estimation of the mean percent identity between the sequences of a MSA.
- New function and type added to `Information` module.
+ `cumulative` to calculate cMI (cumulative mutual information) and similar cumulative scores.
+ `KullbackLeibler` to estimate conservation.
- `proximitymean` is defined in the `PDB` module to calculate pMI
(proximity mutual information) and other proximity scores.
- `contact` and `distance` have a vectorized form to create contact/distance maps.
- `NCol` file annotation with the number of columns in the original MSA.
- `BLMI` has `lambda` as a keyword argument for using additive smoothing.
- `BLMI` and `buslje09` accepts `samples=0` to avoid the Z score estimation.
- `read`/`parse` added the keyword argument `checkalphabet` for deleting sequences with non
standard amino acids.
- `read`/`parse` added the keyword argument `keepinserts` for keep insert columns
(It creates an `Aligned` column annotation).
**MIToS v1.0** also includes several **bug fixes** and a more complete **documentation**.
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 783 | ### Quick DEV Guide
If you are not very familiar with development in *Julia*, you can start with
this simple approach.
1. Clone the repo from *GitHub* and enter the repo directory
2. Start *Julia REPL*
3. Change to *Pkg* mode in *Julia* (press `]`) and activate the environment for
the repo:
```
pkg> activate .
```
4. Go back to normal REPL mode (press backspace) and load
[*Revise*](https://github.com/timholy/Revise.jl)
```
julia> using Revise
```
5. Load *MIToS*
```
julia> using MIToS
```
6. (optional) Check that *Revise* is tracking the correct files
```
julia> Revise.watched_files
```
Edit the code, and the changes should be automatically loaded into the current
session.
Happy coding!
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 7808 | 

## 🐉 MIToS: Mutual Information Tools for protein Sequence analysis
*A Julia Package to Analyze Protein Sequences, Structures, and Evolutionary Information*
<br>
**DOCUMENTATION:** [](https://diegozea.github.io/MIToS.jl/stable) [](https://diegozea.github.io/MIToS.jl/latest)
Linux, OSX & Windows: [](https://github.com/diegozea/MIToS.jl/actions?query=workflow%3A%22CI%22+branch%3Amaster) Code Coverage:
[](https://coveralls.io/github/diegozea/MIToS.jl?branch=master) [](http://codecov.io/github/diegozea/MIToS.jl?branch=master)
> **NOTE:** Some **breaking changes** were introduced between **MIToS 2.15** and **MIToS 3.0**, inclusive. See the [NEWS.md](https://github.com/diegozea/MIToS.jl/blob/master/NEWS.md) file to migrate code from an old version
of MIToS. Most breaking changes will show a deprecation warning with a hint on how to perform the migration. If you need more help migrating code towards MIToS v3, you can write an email to diegozea at gmail dot com asking for assistance.
MIToS provides a comprehensive suite of tools for the analysis of protein sequences and structures.
It allows working with **Multiple Sequence Alignments (MSAs)** to obtain evolutionary information in the Julia language [1].
In particular, it eases the analysis of coevoling position in an MSA using **Mutual Information (MI)**, a measure of covariation.
MI-derived scores are good predictors of inter-residue contacts in a protein structure and functional sites in proteins [2,3].
To allow such analysis, MIToS also implements several useful tools for working with protein structures, such as those available in the **Protein Data Bank (PDB)** or predicted by AlphaFold 2.
MIToS starting point was an improvement of the algorithm published by Buslje et al. [2].
A BLOSUM62-based pseudo-count strategy, similar to Altschul et al.[4], , was implemented to improve performance in the range of MSAs with a low number of sequences [1].
**MIToS** offers all the tools for using, developing, and testing MI-based scores—in fact, any measure based on reside frequencies in an MSA—in different modules.
### Modules
MIToS tools are separated into different modules for different tasks.
- **MSA** This module defines multiple functions and types for dealing with MSAs and
their annotations. It also includes facilities for sequence clustering and shuffling, among others.
- **PDB** This module defines types and methods to work with protein structures from
different sources, such as PDB or AlphaFold DB. It includes functions to superpose structures,
measure the distance between residues, and much more.
- **Information** This module defines residue contingency tables and methods on them to
estimate information measures. This allow to measure evolutionary information on MSAs
positions. It includes functions to estimate corrected mutual information (ZMIp, ZBLMIp)
between MSA columns, as well as conservation estimations using Shannon entropy and the
Kullback-Leibler divergence.
- **SIFTS** This module allows access to SIFTS residue-level mapping of UniProt, Pfam, and
other databases with PDB entries.
- **Pfam** This module uses the previous modules to work with Pfam MSAs. It also offers
useful functions for parameter optimization using Pfam alignments.
- **Utils** It exports common utils functions and types used in different modules of this package.
### Installation
To install MIToS, you need to execute the following code in Julia:
```julia
using Pkg; Pkg.add("MIToS")
```
To update your installed version, you can execute:
```julia
using Pkg; Pkg.update("MIToS")`
```
### Scripts
The [MIToS_Scripts](https://github.com/MIToSOrg/MIToS_Scripts.jl) package offers a set of easy-to-use scripts to access some functionalities MIToS offers from the terminal. These scripts are designed for researchers familiar with command-line interfaces (CLI) but without experience coding in Julia. The available scripts include:
* **Buslje09.jl**: Calculates corrected Mutual Information (MI/MIp) based on Buslje et al., 2009.
* **BLMI.jl**: Computes corrected mutual information using BLOSUM62-based pseudo-counts, as described in the MIToS publication [1].
* **Conservation.jl**: Calculates Shannon entropy and Kullback-Leibler divergence for each MSA column.
* **Distances.jl**: Computes inter-residue distances in a PDB file.
* **PercentIdentity.jl**: Calculates the percentage identity between all sequences in an MSA and provides statistical summaries.
* **MSADescription.jl**: Provides statistics for a given Stockholm file, including clustering information and sequence coverage.
This list is not exhaustive; more scripts are available in the [MIToS_Scripts.jl repository](https://github.com/MIToSOrg/MIToS_Scripts.jl). Visit the repository for more details and to access these scripts.
### Order versions
MIToS 3.0 requires Julia 1.9 or higher. It is recommended that you use these versions to get the best experience coding with Julia and MIToS.
If you need to use MIToS in a Julia version lower than 1.0, you will need to look at the [older MIToS v1 documentation](https://diegozea.github.io/mitosghpage-legacy/).
### Citation
If you use MIToS, please cite:
Diego J. Zea, Diego Anfossi, Morten Nielsen, Cristina Marino-Buslje; **MIToS.jl: mutual information tools for protein sequence analysis in the Julia language**, Bioinformatics, Volume 33, Issue 4, 15 February 2017, Pages 564–565, [https://doi.org/10.1093/bioinformatics/btw646](https://doi.org/10.1093/bioinformatics/btw646)
### References
1. Zea, Diego Javier, et al. "MIToS. jl: mutual information tools for protein sequence
analysis in the Julia language." Bioinformatics 33, no. 4 (2016): 564-565.
2. Buslje, Cristina Marino, et al. "Correction for phylogeny, small number of
observations and data redundancy improves the identification of coevolving amino acid
pairs using mutual information." Bioinformatics 25.9 (2009): 1125-1131.
3. Buslje, Cristina Marino, et al. "Networks of high mutual information define the
structural proximity of catalytic sites: implications for catalytic residue
identification." PLoS Comput Biol 6.11 (2010): e1000978.
4. Altschul, Stephen F., et al. "Gapped BLAST and PSI-BLAST: a new generation of protein
database search programs." Nucleic acids research 25.17 (1997): 3389-3402.
### Acknowledgments
MIToS was initially developed at the *Structural Bioinformatics Unit* of the
[*Fundación Instituto Leloir*](https://www.leloir.org.ar/) (*FIL*) in Argentina.
Its development now continues at the [*Molecular Assemblies and Genome Integrity*](https://www.i2bc.paris-saclay.fr/molecular-assemblies-and-genome-integrity/)
group of the [*Institute for Integrative Biology of the Cell*](https://www.i2bc.paris-saclay.fr/)
(*I2BC*) in France.
We want to thank all [**contributors**](https://github.com/diegozea/MIToS.jl/graphs/contributors)
who have helped improve MIToS. We also thank the Julia community and all the MIToS users
for their feedback and support.


| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 215 | To run the benchmark suite, you need to have the `PkgBenchmark` package installed.
Then, you can run the following code in the Julia REPL:
```julia
import PkgBenchmark, MIToS; PkgBenchmark.benchmarkpkg(MIToS)
``` | MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 2884 | ```@setup log
@info "Example"
```
# Example
In this simple demonstration, you will see how to calculate **ZBLMIp** (**Z** score of the
corrected **MIp** using BLOSUM62 pseudo frequencies) for a
[Pfam](https://www.ebi.ac.uk/interpro/entry/pfam/#table)
MSA from the [Julia REPL](@ref juliarepl) or using a
[MIToS script in the system command line](@ref commandline).
## [MIToS in the Julia REPL](@id juliarepl)
If you load the `Pfam` module from `MIToS`, you will get access to a set of functions that
work with Pfam MSAs. In this case, we are going to use it for download a
[Stockholm](https://en.wikipedia.org/wiki/Stockholm_format)
MSA from the Pfam website and read it into Julia.
```@example juliarepl
using MIToS.Pfam
pfam_file = downloadpfam("PF10660")
msa = read_file(pfam_file, Stockholm, generatemapping = true, useidcoordinates = true)
```
!!! note "Generation of sequence and column mappings"
The keyword argument `generatemapping` of `read_file` allows to generate sequence and column
mappings for the MSA. *Column mapping* is the map between of each column on the MSA
object and the column number in the file. *Sequence mappings* will use the start and
end coordinates in the sequence ids for enumerate each residue in the sequence if
`useidcoordinates` is `true`.
You can plot this MSA and other MIToS’ objects using the [Plots](https://juliaplots.github.io/) package. The installation of *Plots* is described in the *Installation* section of this site:
```@example juliarepl
using Plots
plot(msa)
png("msa.png") # hide
nothing # hide
```

The `Information` module of `MIToS` has functions to calculate measures from the
[Information Theory](https://en.wikipedia.org/wiki/Information_theory),
such as Shannon Entropy and Mutual Information (MI), on a MSA. In this example, we will estimate
covariation between columns of the MSA with a corrected **MI** that use the BLOSUM62 matrix
for calculate pseudo frequencies (`BLMI`).
```@example juliarepl
using MIToS.Information
ZBLMIp, BLMIp = BLMI(msa)
ZBLMIp # shows ZBLMIp scores
```
Once the *Plots* package is installed and loaded, you can use its capabilities to visualize
this results:
```@example juliarepl
heatmap(ZBLMIp, yflip = true, c = :grays)
png("blmi.png") # hide
nothing # hide
```

```@setup juliarepl
rm(pfam_file) # clean up
```
## [MIToS in system command line](@id commandline)
Calculate ZBLMIp on the system shell is easy using the script called `BLMI.jl` in the
[MIToS_Scripts.jl](https://github.com/MIToSOrg/MIToS_Scripts.jl)
package. This script reads a MSA file, and writes a file with the same base name of the
input but with the `.BLMI.csv` extension.
```
julia BLMI.jl PF14972.stockholm.gz
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 19585 | ```@meta
CurrentModule = MIToS.Information
```
```@setup log
@info "Information docs"
```
# [Information](@id Module-Information)
The `Information` module of MIToS defines types and functions useful to calculate
information measures (e.g. *Mutual Information* (MI) and *Entropy*) over a Multiple
Sequence Alignment (MSA). This module was designed to count `Residue`s
(defined in the `MSA` module) in special contingency tables (as fast as possible) and to
derive probabilities from these counts. Also, includes methods for applying corrections
to those tables, e.g. pseudocounts and pseudo frequencies. Finally, `Information` allows
to use these probabilities and counts to estimate information measures and other
frequency based values.
```julia
using MIToS.Information # to load the Information module
```
## Features
- Estimate multi dimensional frequencies and probability tables from sequences, MSAs, etc...
- Correction for small number of observations
- Correction for data redundancy on a MSA
- Estimate information measures
- Calculate corrected mutual information between residues
## Contents
```@contents
Pages = ["Information.md"]
Depth = 4
```
## Counting residues
MIToS Information module defines a multidimensional `ContingencyTable` type and two types
wrapping it, `Frequencies` and `Probabilities`, to store occurrences or probabilities.
The `ContingencyTable` type stores the contingency matrix, its marginal values and total.
These types are parametric, taking three ordered parameters:
- `T` : The type used for storing the counts or probabilities, e.g. `Float64`. It's possible to use `BigFloat` if more precision it's needed.
- `N` : It's the dimension of the table and should be an `Int`.
- `A` : This should be a type, subtype of `ResidueAlphabet`, i.e.: `UngappedAlphabet`, `GappedAlphabet` or `ReducedAlphabet`.
!!! note
`ContingencyTable` can be used for storing probabilities or counts. The wrapper types
`Probabilities` and `Frequencies` are mainly intended to dispatch in methods that need to
know if the matrix has probabilities or counts, e.g. `shannon_entropy`. In general,
the use of `ContingencyTable` is recommended over the use of `Probabilities` and `Frequencies`.
In this way, a matrix for storing pairwise probabilities of residues (without gaps) can be
initialized using:
```@example inf_zeros
using MIToS.Information
Pij = ContingencyTable(Float64, Val{2}, UngappedAlphabet())
```
**[High level interface]** It is possible to use the functions `frequencies` and `probabilities`
to easily calculate the frequencies of sequences or columns of a MSA, where the number of
sequences/columns determine the dimension of the resulting table.
```@example inf_count
using MIToS.Information
using MIToS.MSA # to use res"..." to create Vector{Residue}
column_i = res"AARANHDDRDC-"
column_j = res"-ARRNHADRAVY"
# Nij[R,R] = 1 1 = 2
Nij = frequencies(column_i, column_j)
```
You can use `sum` to get the stored total:
```@example inf_count
sum(Nij) # There are 12 Residues, but 2 are gaps
```
Contingency tables can be indexed using `Int` or `Residue`s:
```@example inf_count
Nij[2, 2] # Use Int to index the table
```
```@example inf_count
Nij[Residue('R'), Residue('R')] # Use Residue to index the table
```
!!! warning
The number makes reference to the specific index in the table e.g `[2,2]` references
the second row and the second column. The use of the number used to encode the residue
to index the table is dangerous. The equivalent index number of a residue depends on
the used alphabet and `Int(Residue('X'))` will be always out of bounds.
Indexing with `Residue`s works as expected. It uses the alphabet of the contingency table
to find the index of the `Residue`.
```@example inf_reduced
using MIToS.Information
using MIToS.MSA
alphabet = ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP")
column_i = res"AARANHDDRDC-"
column_j = res"-ARRNHADRAVY"
# Fij[R,R] = 1 1 1 = 3 # RHK
Fij = frequencies(column_i, column_j, alphabet = alphabet)
```
```@example inf_reduced
Fij[Residue('R'), Residue('R')] # Use Residue to index the table
```
The function `getcontingencytable` allows to access the wrapped `ContingencyTable` in a
`Frequencies` object. You can use it, in combination with `normalize` to get a contingency table
of probabilities. The result can be wrapped inside a `Probabilities` object:
```@example inf_reduced
Probabilities(normalize(getcontingencytable(Fij)))
```
#### Example: Plotting the probabilities of each residue in a sequence
Similar to the `frequencies` function, the `probabilities` function can take at least one
sequence (vector of residues) and returns the probabilities of each residue. Optionally,
the keyword argument `alphabet` could be used to count some residues in the same cell
of the table.
```@example inf_reduced
probabilities(res"AARANHDDRDC", alphabet = alphabet)
```
Here, we are going to use the `probabilities` function to get the residue probabilities of a
particular sequence from *UniProt*.
use the `getsequence` function, from the `MSA` module, to get the sequence from a `FASTA` downloaded from UniProt.
```@repl
using MIToS.Information # to use the probabilities function
using MIToS.MSA # to use getsequence on the one sequence FASTA (canonical) from UniProt
seq = read_file("http://www.uniprot.org/uniprot/P29374.fasta", FASTA) # Small hack: read the single sequence as a MSA
probabilities(seq[1, :]) # Select the single sequence and calculate the probabilities
```
```@setup inf_plotfreq
@info "Information: Plots"
using Plots
gr(size=(600,300))
using MIToS.Information # to use the probabilities function
using MIToS.MSA # to use getsequence on the one sequence FASTA (canonical) from UniProt
seq = read_file("http://www.uniprot.org/uniprot/P29374.fasta", FASTA) # Small hack: read the single sequence as a MSA
Pa = probabilities(seq[1,:]) # Select the single sequence and calculate the probabilities
```
```@example inf_plotfreq
using Plots # We choose Plots because it's intuitive, concise and backend independent
gr(size = (600, 300))
```
You can plot together with the probabilities of each residue in a given sequence, the
probabilities of each residue estimated with the BLOSUM62 substitution matrix. That matrix
is exported as a constant by the `Information` module as `BLOSUM62_Pi`.
```@example inf_plotfreq
bar(1:20, [Pa BLOSUM62_Pi], lab = ["Sequence" "BLOSUM62"], alpha = 0.5)
png("inf_plotfreq.png") # hide
nothing # hide
```

## Low count corrections
Low number of observations can lead to sparse contingency tables, that lead to wrong
probability estimations. It is shown in [buslje2009correction](@citet)
that low-count corrections, can lead to improvements in the contact prediction capabilities
of the Mutual Information. The Information module has available two low-count corrections:
1. [Additive Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing); the constant value pseudocount described in [buslje2009correction](@citet).
2. BLOSUM62 based pseudo frequencies of residues pairs, similar to [altschul1997gapped](@citet).
```@example inf_msa
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
filtercolumns!(msa, columngapfraction(msa) .< 0.5) # delete columns with 50% gaps or more
column_i = msa[:, 1]
column_j = msa[:, 2]
```
If you have a preallocated `ContingencyTable` you can use `frequencies!` to fill it, this prevent
to create a new table as `frequencies` do. However, you should note that `frequencies!` **adds the new
counts to the pre existing values**, so in this case, we want to start with a table
initialized with zeros.
```@example inf_msa
using MIToS.Information
const alphabet = ReducedAlphabet("(AILMV)(NQST)(RHK)(DE)(FWY)CGP")
Nij = ContingencyTable(Float64, Val{2}, alphabet)
```
```@example inf_msa
frequencies!(Nij, column_i, column_j)
```
In cases like the above, where there are few observations, it is possible to apply a
constant pseudocount to the counting table. This module defines the type
`AdditiveSmoothing` and the correspond `fill!` and `apply_pseudocount!` methods to
efficiently add or fill with a constant value each element of the table.
```@example inf_msa
apply_pseudocount!(Nij, AdditiveSmoothing(1.0))
```
**[High level interface.]** The `frequencies` and `frequencies!` function has a
`pseudocounts` keyword argument that can take a `AdditiveSmoothing` value to easily
calculate occurrences with pseudocounts. Also their `alphabet` keyword argument can be
used to chage the default alphabet.
```@example inf_msa
frequencies(column_i, column_j, pseudocounts = AdditiveSmoothing(1.0), alphabet = alphabet)
```
To use the conditional probability matrix `BLOSUM62_Pij` in the calculation of pseudo
frequencies $G$ for the pair of residues $a$, $b$, it should be calculated first the real
frequencies/probabilities $p_{a,b}$. The observed probabilities are then used to estimate
the pseudo frequencies.
$$G_{ab} = \sum_{cd} p_{cd} \cdot BLOSUM62( a | c ) \cdot BLOSUM62( b | d )$$
Finally, the probability $P$ of each pair of residues $a$, $b$ between the columns
$i$, $j$ is the weighted mean between the observed frequency $p$ and BLOSUM62-based
pseudo frequency $G$, where α is generally the number of clusters or the number of
sequences of the MSA and β is an empiric weight value. β was determined to be close
to `8.512`.
$$P_{ab} = \frac{\alpha \cdot p_{ab} + \beta \cdot G_{ab} }{\alpha + \beta}$$
This could be easily achieved using the `pseudofrequencies` keyword argument of the
`probabilities` function. That argument can take a `BLOSUM_Pseudofrequencies` object that
is created with α and β as first and second argument, respectively.
```@example inf_msa
Pij = probabilities(
column_i,
column_j,
pseudofrequencies = BLOSUM_Pseudofrequencies(nsequences(msa), 8.512),
)
```
You can also use `apply_pseudofrequencies!` in a previously filled probability contingency
table. i.e. `apply_pseudofrequencies!(Pij, BLOSUM_Pseudofrequencies(α, β))`
!!! warning
`BLOSUM_Pseudofrequencies` can be only be applied in **normalized/probability** tables
with `UngappedAlphabet`.
## Correction for data redundancy in a MSA
A simple way to reduce redundancy in a MSA without losing sequences, is clusterization and
sequence weighting. The weight of each sequence should be 1/N, where N is the number of
sequences in its cluster. The `Clusters` type of the `MSA` module stores the
weights. This vector of weights can be extracted (with the `getweight` function) and used
by the `frequencies` and `probabilities` functions with the keyword argument `weights`. Also it's
possible to use the `Clusters` as second argument of the function `frequencies!`.
```@example inf_msa
clusters = hobohmI(msa, 62) # from MIToS.MSA
```
```@example inf_msa
frequencies(msa[:, 1], msa[:, 2], weights = clusters)
```
## Estimating information measures on an MSA
The `Information` module has a number of functions defined to calculate information
measures from `Frequencies` and `Probabilities`:
- `shannon_entropy` : Shannon entropy (H)
- `marginal_entropy` : Shannon entropy (H) of the marginals
- `kullback_leibler` : Kullback-Leibler (KL) divergence
- `mutual_information` : Mutual Information (MI)
- `normalized_mutual_information` : Normalized Mutual Information (nMI) by Entropy
- `gap_intersection_percentage`
- `gap_union_percentage`
Information measure functions take optionally the base as a keyword argument (default: `ℯ`).
You can set `base=2` to measure information in bits.
```@example inf_information
using MIToS.Information
using MIToS.MSA
Ni = frequencies(res"PPCDPPPPPKDKKKKDDGPP") # Ni has the count table of residues in this low complexity sequence
H = shannon_entropy(Ni) # returns the Shannon entropy in nats (base e)
```
```@example inf_information
H = shannon_entropy(Ni, base = 2) # returns the Shannon entropy in bits (base 2)
```
Information module defines special iteration functions to easily and efficiently compute a
measure over a MSA. In particular, `mapcolfreq!` and `mapseqfreq!` map a function that takes
a table of `Frequencies` or `Probabilities`. The table is filled in place with the counts or
probabilities of each column or sequence of a MSA, respectively. `mapcolpairfreq!` and
`mapseqpairfreq!` are similar, but they fill the table using pairs of columns or sequences,
respectively.
This functions take three positional arguments: the function `f` to be calculated, the
`msa` and `table` of `Frequencies` or `Probabilities`.
After that, this function takes some keyword arguments:
- `weights` (default: `NoClustering()`) : Weights to be used for table counting.
- `pseudocounts` (default: `NoPseudocount()`) : `Pseudocount` object to be applied to table.
- `pseudofrequencies` (default: `NoPseudofrequencies()`) : `Pseudofrequencies` to be applied to the normalized (probabilities) table.
- `usediagonal` (default: `true`) : Indicates if the function should be applied to pairs containing the same sequence or column.
- `diagonalvalue` (default to zero) : The value that fills the diagonal elements of the table if `usediagonal` is `false`.
#### Example: Estimating *H(X)* and *H(X, Y)* over an MSA
In this example, we are going to use `mapcolfreq!` and `mapcolpairfreq!` to estimate
Shannon `shannon_entropy` of MSA columns *H(X)* and the joint entropy *H(X, Y)* of columns pairs,
respectively.
```@setup inf_entropy
@info "Information: Entropy"
using Plots
gr()
```
```@example inf_entropy
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
```
We are going to count residues to estimate the Shannon entropy. The `shannon_entropy`
estimation is performed over a rehused `Frequencies` object. The result will be a vector
containing the values estimated over each column without counting gaps (`UngappedAlphabet`).
```@example inf_entropy
using MIToS.Information
Hx = mapcolfreq!(
shannon_entropy,
msa,
Frequencies(ContingencyTable(Float64, Val{1}, UngappedAlphabet())),
)
```
If we want the **joint entropy** between columns pairs, we need to use a bidimensional
table of `Frequencies` and `mapcolpairfreq!`.
```@example inf_entropy
Hxy = mapcolpairfreq!(
shannon_entropy,
msa,
Frequencies(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
)
```
In the above examples, we indicate the type of each occurrence in the counting and the probability table to use. Also, it's possible for some measures as **entropy** and **mutual information**, to estimate the values only with the count table (without calculate the probability table). Estimating measures only with a `ResidueCount` table, when this is possible, should be faster than using a probability table.
```@example inf_entropy
Time_Pab = map(1:100) do x
time = @elapsed mapcolpairfreq!(
shannon_entropy,
msa,
Probabilities(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
)
end
Time_Nab = map(1:100) do x
time = @elapsed mapcolpairfreq!(
shannon_entropy,
msa,
Frequencies(ContingencyTable(Float64, Val{2}, UngappedAlphabet())),
)
end
using Plots
gr()
histogram(
[Time_Pab Time_Nab],
labels = ["Using ResidueProbability" "Using ResidueCount"],
xlabel = "Execution time [seconds]",
)
png("inf_entropy.png") # hide
nothing # hide
```

## Corrected Mutual Information
MIToS ships with two methods to easily calculate corrected mutual information.
The first is the algorithm described in [buslje2009correction](@citet).
This algorithm can be accessed through the `buslje09` function and includes:
1. Low count correction using `AdditiveSmoothing`
2. Sequence weighting after a `hobohmI` clustering [hobohm1992selection](@cite)
3. Average Product Correction (APC) proposed by [dunn2008mutual](@citet),
through the `APC!` function that takes a MI matrix.
4. Z score correction using the functions `shuffle_msa!` from the MSA module and `zscore`
from the `PairwiseListMatrices` package.
```@docs
buslje09
```
The second, implemented in the `BLMI` function, has the same corrections that the above
algorithm, but use BLOSUM62 pseudo frequencies. This function is **slower** than
`buslje09` (at the same number of samples), but gives **better performance**
(for structural contact prediction) when the MSA has **less than 400 clusters** after a
Hobohm I at 62% identity.
```@docs
BLMI
```
#### Example: Estimating corrected MI from an MSA
```@setup inf_buslje09
@info "Information: MI"
using Plots
gr()
```
```@example inf_buslje09
using MIToS.MSA
using MIToS.Information
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
ZMIp, MIp = buslje09(msa)
ZMIp
```
```@example inf_buslje09
ZBLMIp, BLMIp = BLMI(msa)
ZBLMIp
```
## Visualize Mutual Information
You can use the function of the `Plots` package to visualize the Mutual Information (MI)
network between residues. As an example, we are going to visualize the MI between residues
of the Pfam domain *PF18883*. The `heatmap` is the simplest way to visualize the values of
the Mutual Information matrix.
```@example inf_buslje09
using Plots
gr()
heatmap(ZMIp, yflip = true)
png("inf_heatmap.png") # hide
nothing # hide
```

ZMIp is a Z score of the corrected MIp against its distribution on a random MSA
(shuffling the residues in each sequence), so pairs with highest values are more likely
to co-evolve. Here, we are going to use the top 1% pairs of MSA columns.
```@example inf_buslje09
using PairwiseListMatrices # to use getlist
using Statistics # to use quantile
threshold = quantile(getlist(ZMIp), 0.99)
```
```@example inf_buslje09
ZMIp[ZMIp.<threshold] .= NaN
heatmap(ZMIp, yflip = true)
png("inf_heatmap_top.png") # hide
nothing # hide
```

We are going to calculate the cMI (cumulative mutual information) value of each node.
Where cMI is a mutual information score per position that characterizes the extent of
mutual information "interactions" in its neighbourhood. This score is calculated as the
sum of MI values above a certain threshold for every amino acid pair where the particular
residue appears. This value defines to what degree a given amino acid takes part in a
mutual information network and we are going to indicate it using the node color.
To calculate cMI we are going to use the `cumulative` function:
```@example inf_buslje09
cMI = cumulative(ZMIp, threshold)
```
```@setup comment_block
# # Setup block to hide this until PlotRecipes get fixed
# The nodes have an order, because they are columns in a MSA. So, the **arc diagram** it's
# useful to visualize long and short association between MSA positions. In general, long
# interactions has more interest.
# ` ` `@example inf_buslje09
# using PlotRecipes
# graphplot(ZMIp, size=(600,250), method=:arcdiagram) # , zcolor=cMI)
# png("inf_arcdiagram.png") # hide
# nothing # hide
# ` ` `
# 
# You can also use a **chord diagram** to see the same pattern.
# ` ` `@example inf_buslje09
# graphplot(ZMIp, size=(600,600), method=:chorddiagram)
# png("inf_chorddiagram.png") # hide
# nothing # hide
# ` ` `
# 
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 564 | ```@setup log
@info "Information API docs"
```
# Information
```@docs
MIToS.Information
```
## Contents
```@contents
Pages = ["Information_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.Information]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.Information]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.Information]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.Information]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 2689 | ```@setup log
@info "Installation docs"
```
# Installation
First you need to install [**Julia.**](https://julialang.org/downloads/)
MIToS' stable version can be installed by typing on the Julia REPL:
```julia
using Pkg
Pkg.add("MIToS")
```
If everything goes well with the installation, MIToS will be loaded without errors by typing:
```julia
using MIToS
```
To update MIToS to the latest version, you can run:
```julia
using Pkg
Pkg.update("MIToS")
```
!!! tip "Ways to run Julia"
- **[Julia REPL ](https://docs.julialang.org/en/v1/stdlib/REPL/):** Built-in Julia command line. Start a Julia interactive session (REPL) by double-clicking the Julia executable or running `julia` from the system command line.
- **[IJulia ](https://github.com/JuliaLang/IJulia.jl):** *Jupyter/IPython notebook* for Julia.
- **[Pluto ](https://github.com/fonsp/Pluto.jl):** A simple reactive notebook for Julia.
- **[VS Code Extension for Julia ](https://www.julia-vscode.org/):** The Julia's Integrated Development Environment (IDE).
!!! info "Running the test suite"
**Optionally**, you can run the test suite to ensure everything works as expected.
The test suite is extensive and can take several minutes to run. It is the same test
suite used for MIToS' continuous integration (CI), so everything should pass. To run
the test suite, execute `using Pkg; Pkg.test("MIToS")` in the Julia REPL.
## Plots installation
Julia plotting capabilities are available through external packages. MIToS makes use of
*RecipesBase* to define plot recipes, which can be plotted using
[Plots](http://docs.juliaplots.org/latest/) and its different
backends. You need to [install Plots](http://docs.juliaplots.org/latest/install/)
to plot MIToS objects:
```julia
using Pkg
Pkg.add("Plots")
```
Once it is installed, you need to load Plots in order to use the `plot` function. There is
more information about it in the [Plots documentation](http://docs.juliaplots.org/latest/).
```julia
using Plots
```
To generate **graph** (network), **arc** and **chord** (circo) **plots**, you also need to
install and load [GraphRecipes](https://github.com/JuliaPlots/GraphRecipes.jl).
```julia
using Pkg
Pkg.add("GraphRecipes")
using GraphRecipes
```
You can look for examples in the [GraphRecipes documentation](https://docs.juliaplots.org/stable/GraphRecipes/examples/).
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 35058 | ```@setup log
@info "MSA docs"
```
# [MSA](@id Module-MSA)
The MSA module of MIToS has utilities for working with Multiple Sequence Alignments of
protein Sequences (MSA).
```julia
using MIToS.MSA # to load the MSA module
```
## Features
- [**Read**](@ref Reading-MSA-files) and [**write**](@ref Writing-MSA-files) MSAs in `Stockholm`, `FASTA`, `A3M`, `A2M`, `PIR` or `Raw` format.
- Handle [**MSA annotations**](@ref MSA-Annotations).
- [**Edit the MSA**](@ref Editing-your-MSA), e.g. delete columns or sequences, change sequence order, shuffling...
- [**Keep track of positions**](@ref Column-and-sequence-mappings) and annotations after modifications on the MSA.
- [**Describe an MSA**](@ref Describing-your-MSA), e.g. mean percent identity, sequence coverage, gap percentage...
- [**Sequence clustering**](@ref Sequence-clustering) with a fast implementation of the Hobohm I algorithm.
## Contents
```@contents
Pages = ["MSA.md"]
Depth = 4
```
## [MSA IO](@id MSA-IO)
### [Reading MSA files](@id Reading-MSA-files)
The main function for reading MSA files in MIToS is `read_file` and it is defined in the `Utils`
module. This function takes a filename/path as a first argument followed by other
arguments. It opens the file and uses the arguments to call the `parse_file` function.
`read_file` decides how to open the file, using the prefixes (e.g. https) and suffixes
(i.e. extensions) of the file name, while `parse_file` does the actual parsing of
the file. You can `read_file` **gzipped files** if they have the `.gz` extension and
also urls pointing to a **web file**.
The second argument of `read_file` and `parse_file` is the file `FileFormat`. The supported MSA formats
at the moment are `Stockholm`, `FASTA`, `PIR` (NBRF), `A3M`, `A2M`, and `Raw`.
For example, reading with MIToS the full Stockholm MSA of the Pfam family *PF09645* from
the MIToS test data will be:
```@example msa_read
using MIToS.MSA
read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
)
```
The third (and optional) argument of `read_file` and `parse_file` is the output MSA type:
- `Matrix{Residue}` : It only contains the aligned sequences.
- `MultipleSequenceAlignment` : It contains the aligned sequences and their names/identifiers.
- `AnnotatedMultipleSequenceAlignment` : It's the richest MIToS' MSA format and it's the default. It includes the aligned sequences, their names and the MSA annotations.
Example of `Matrix{Residue}` output using a `Stockholm` file as input:
```@example msa_read
read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
Matrix{Residue},
)
```
Because `read_file` calls `parse_file`, you should look into the documentation of `parse_file`
to know the available keyword arguments. The optional keyword arguments of
those functions are:
- `generatemapping` : If `generatemapping` is `true` (default: `false`), sequences and columns mappings are generated and saved in the MSA annotations. **The default is `false` to not overwrite mappings by mistake when you read an annotated MSA file saved with MIToS.**
- `useidcoordinates` : If `useidcoordinates` is `true` (default: `false`) and the names have the form *seqname/start-end*, MIToS uses this coordinates to generate sequence
mappings. This is safe and useful with unmodified Pfam MSAs. **Do not use it when reading an MSA saved with MIToS. MIToS deletes unaligned insert columns, therefore disrupts sequences that have them.**
- `deletefullgaps` : Given that lowercase characters and dots are converted to gaps, unaligned insert columns in the MSA (derived from a HMM profile) are converted into full
gap columns. `deletefullgaps` is `true` by default, deleting full gaps columns and therefore insert columns.
!!! note
**If you want to keep the insert columns...** Use the keyword argument `keepinserts`
to `true` in `read_file`/`parse_file`. This only works with an `AnnotatedMultipleSequenceAlignment`
output. A column annotation (`"Aligned"`) is stored in the annotations, where insert
columns are marked with `0` and aligned columns with `1`.
When `read_file` returns an `AnnotatedMultipleSequenceAlignment`, it uses the MSA `Annotations`
to keep track of performed modifications. To access these notes, use `printmodifications`:
```@example msa_read
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
)
printmodifications(msa)
```
### [Writing MSA files](@id Writing-MSA-files)
Julia REPL shows MSAs as Matrices. If you want to print them in another format, you should
use the `print_file` function with an MSA object as first argument and the `FileFormat` `FASTA`,
`Stockholm`, `PIR` or `Raw` as second argument.
```@example msa_write
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
) # reads a Stockholm MSA file
print_file(msa, FASTA) # prints msa in FASTA format
```
To save an MSA object to a file, use the `write_file` function. This function takes a filename
as a first argument. If the filename ends with `.gz`, the output will be a compressed
(gzipped) file. The next two arguments of `write_file` are passed to `print_file`,
so `write_file` behaves as `print_file`.
```@example msa_write
write_file("msa.gz", msa, FASTA) # writes msa in FASTA format in a gzipped file
```
## [MSA Annotations](@id MSA-Annotations)
MSA annotations are based on the Stockholm format mark-ups. There are four types of
annotations stored as dictionaries. All the annotations have a feature name as part of the
key, which should be a single "word" (without spaces) and less than 50 characters long.
- **File annotations** : The annotations can contain either file or MSA information. They have feature names as keys and the values are strings (free text). Lines starting with
`#=GF` in Stockholm format.
- **Column annotations** : They have feature names as keys and strings with exactly 1 char per column as values. Lines starting with `#=GC` in Stockholm format.
- **Sequence annotations** : The keys are tuples with the sequence name and the feature name. The values are free text (strings). Lines starting with `#=GS` in Stockholm format.
Annotations in the `PIR`/NBRF format are also stored as sequence annotations. In particular, we use the names `"Type"` and `"Title"` to name the sequence type in the
identifier line and the first comment line before the sequence in PIR files, respectively.
- **Residue annotations** : The keys are tuples with the sequence name and the feature name. The values are strings with exactly 1 char per column/residues. `#=GR` lines in Stockholm format.
Julia REPL shows the `Annotations` type as they are represented in the [Stockholm format](https://en.wikipedia.org/wiki/Stockholm_format).
You can get the `Annotations` inside an annotated MSA or sequence using the `annotations`
function.
```@example msa_annot
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF16996.alignment.full",
Stockholm,
)
annotations(msa)
```
Particular annotations can be accessed using the functions `getannot...`. These functions
take the MSA/sequence as first argument and the feature name of the desired annotation as
the last. In the case of `getannotsequence` and `getannotresidue`, the second argument
should be the sequence name.
```@example msa_annot
getannotsequence(msa, "A8AWV6_STRGC/3-57", "AC") # ("A8AWV6_STRGC/3-57", "AC") is the key in the dictionary
```
If you want to add new annotations, you should use the `setannot…!` functions. These
functions have the same arguments that `getannot...` functions except for an
extra argument used to indicate the new annotation value.
```@example msa_annot
setannotsequence!(msa, "A8AWV6_STRGC/3-57", "New_Feature_Name", "New_Annotation")
```
A `getannot...` function without the key (last arguments), returns the particular
annotation dictionary. As you can see, the new sequence annotation is now part of our
MSA annotations.
```@example msa_annot
getannotsequence(msa)
```
## [Editing your MSA](@id Editing-your-MSA)
MIToS offers functions to edit your MSA. Because these functions modify the msa, their
names end with a bang `!`, following the Julia convention. Some of these functions have an
`annotate` keyword argument (in general, it's `true` by default) to indicate if the
modification should be recorded in the MSA/sequence annotations.
One common task is to delete sequences or columns of the MSA. This could be done using the
functions `filtersequences!` and `filtercolumns!`. These functions take the MSA or sequence
(if it's possible) as first argument and a `BitVector` or `Vector{Bool}` mask as second
argument. It deletes all the sequences or columns where the mask is `false`. These functions
are also defined for `Annotations`, this allows to automatically update (modify) the
annotations (and therefore, sequence and column mappings) in the MSA.
This two deleting operations are used in the second and third mutating
functions of the following list:
- `setreference!` : Sets one of the sequences as the first sequence of the MSA (query or reference sequence).
- `adjustreference!` : Deletes columns with gaps in the first sequence of the MSA (reference).
- `gapstrip!` : This function first calls `adjustreference!`, then deletes sequences with low (user defined) MSA coverage and finally, columns with user defined % of gaps.
There is also the `shuffle_msa!` function, which generates random alignments by scrambling
the sequences or columns within a multiple sequence alignment (MSA). This function
randomly permutes the residues along sequences (`dims=1`) or columns
(`dims=2`). The optional `subset` argument allows you to shuffle only a subset of them.
Additionally, the `fixedgaps` keyword argument specifies whether gaps should remain in
their positions, and the `fixed_reference` keyword argument indicates if the residues in
the first sequence should remain in their positions. This function is pretty useful to
generate the null distribution of a statistic. For example, it is used in the
`Information` module of `MIToS` uses them to calculate the Z scores of the MI values.
#### [Example: Deleting sequences](@id Example:-Deleting-sequences)
For example, if you want to delete all proteins from *Sulfolobus islandicus* in the
*PF09645* MSA, you can delete all the sequences that have `_SULIY` in their
UniProt entry names:
```@example msa_edit
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
)
sequencenames(msa) # the function sequencenames returns the sequence names in the MSA
```
```@example msa_edit
mask = map(x -> !occursin(r"_SULIY", x), sequencenames(msa)) # an element of mask is true if "_SULIY" is not in the name
```
```@example msa_edit
filtersequences!(msa, mask) # deletes all the sequences where mask is false
sequencenames(msa)
```
#### [Example: Exporting a MSA for freecontact (part I)](@id Example:-Exporting-a-MSA-for-freecontact-(part-I))
The most simple input for the command line tool [freecontact](https://rostlab.org/owiki/index.php/FreeContact)
(if you don't want to set `--mincontsep`) is a `Raw` MSA file with a reference sequence
without insertions or gaps. This is easy to get with MIToS using `read_file` (deletes the insert
columns), `setreference!` (to choose a reference), `adjustreference!` (to delete columns
with gaps in the reference) and `write_file` (to save it in `Raw` format) functions.
```@repl
using MIToS.MSA
file_name = "https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm"
msa = read_file(file_name, Stockholm)
msa_coverage = coverage(msa)
maxcoverage, maxindex = findmax(msa_coverage)
setreference!(msa, maxindex[1]) # the sequence with the highest coverage
adjustreference!(msa)
write_file("tofreecontact.msa", msa, Raw)
print(read_file("tofreecontact.msa", String)) # display output file
```
## [Column and sequence mappings](@id Column-and-sequence-mappings)
Inserts in a Stockholm MSA allow to access the full fragment of the aligned sequences.
Using this, combined with the sequence names that contain coordinates used in Pfam, you
can know what is the UniProt residue number of each residue in the MSA.
```julia
"PROT_SPECI/3-15 .....insertALIGNED"
# 3456789111111
# 012345
```
MIToS `read_file` and `parse_file` functions delete the insert columns, but they do the mapping
between each residue and its residue number before deleting insert columns when `generatemapping` is
`true`. If you don't set `useidcoordinates` to `true`, the residue first `i` residue will
be 1 instead of 3 in the previous example.
```@example msa_mapping
using MIToS.MSA
msa = parse_file(
"PROT_SPECI/3-15 .....insertALIGNED",
Stockholm,
generatemapping = true,
useidcoordinates = true,
)
```
MIToS also keeps the column number of the input MSA and its total number of columns. All
this data is stored in the MSA annotations using the `SeqMap`, `ColMap` and `NCol` feature
names.
```@example msa_mapping
annotations(msa)
```
To have an easy access to mapping data, MIToS provides the `getsequencemapping` and
`getcolumnmapping` functions.
```@example msa_mapping
getsequencemapping(msa, "PROT_SPECI/3-15")
```
```@example msa_mapping
getcolumnmapping(msa)
```
#### [Example: Exporting a MSA for freecontact (part II)](@id Example:-Exporting-a-MSA-for-freecontact-(part-II))
If we want to use the `--mincontsep` argument of `freecontact` to calculate scores between
distant residues, we will need to add a header to the MSA. This header should contains the
residue number of the first residue of the sequence and the full fragment of that sequence
(with the inserts). This data is used by FreeContact to calculate the residue number of
each residue in the reference sequence.
We are going to use MIToS mapping data to create this header, so we read the MSA with
`generatemapping` and `useidcoordinates` set to `true`.
```@example freecontact_ii
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
generatemapping = true,
useidcoordinates = true,
)
```
Here, we are going to choose the sequence with more coverage of the MSA as our reference
sequence.
```@example freecontact_ii
msa_coverage = coverage(msa)
maxcoverage, maxindex = findmax(msa_coverage)
setreference!(msa, maxindex[1])
adjustreference!(msa)
```
MIToS deletes the residues in insert columns, so we are going to use the
sequence mapping to generate the whole fragment of the reference sequence
(filling the missing regions with `'x'`).
```@example freecontact_ii
seqmap = getsequencemapping(msa, 1) # seqmap will be a vector with the residue numbers of the first sequence (reference)
seq = collect(stringsequence(msa, 1)) # seq will be a Vector of Chars with the reference sequence
sequence = map(seqmap[1]:seqmap[end]) do seqpos # for each position in the whole fragment
if seqpos in seqmap # if that position is in the MSA
popfirst!(seq) # the residue is taken from seq
else # otherwise
'x' # 'x' is included
end
end
sequence = join(sequence) # join the Chars on the Vector to create a string
```
Once we have the whole fragment of the sequence, we create the file and write the header in
the required format (as in the man page of freecontact).
```@example freecontact_ii
open("tofreecontact.msa", "w") do fh
println(fh, "# querystart=", seqmap[1])
println(fh, "# query=", sequence)
end
```
As last (optional) argument, `write_file` takes the mode in which is opened the file. We use
`"a"` here to append the MSA to the header.
```@example freecontact_ii
write_file("tofreecontact.msa", msa, Raw, "a")
```
```@example freecontact_ii
print(join(first(readlines("tofreecontact.msa"), 5), '\n')) # It displays the first five lines
```
## [Get sequences from a MSA](@id Get-sequences-from-a-MSA)
It's possible to index the MSA as any other matrix to get an aligned sequence. This will be
return a `Array` of `Residue`s without annotations but keeping names/identifiers.
```@example msa_indexing
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/test/data/PF09645_full.stockholm",
Stockholm,
generatemapping = true,
useidcoordinates = true,
)
```
```@example msa_indexing
msa[2, :] # second sequence of the MSA, it keeps column names
```
```@example msa_indexing
msa[2:2, :] # Using the range 2:2 to select the second sequence, keeping also the sequence name
```
If you want to obtain the aligned sequence with its name and annotations (and therefore
sequence and column mappings), you should use the function `getsequence`. This function
returns an `AlignedSequence` with the sequence name from a `MultipleSequenceAlignment` or
an `AnnotatedAlignedSequence`, that also contains annotations, from an
`AnnotatedMultipleSequenceAlignment`.
```@example msa_indexing
secondsequence = getsequence(msa, 2)
```
```@example msa_indexing
annotations(secondsequence)
```
Use `stringsequence` if you want to get the sequence as a string.
```@example msa_indexing
stringsequence(msa, 2)
```
Because matrices are stored columnwise in Julia, you will find useful the
`getresiduesequences` function when you need to heavily operate over sequences.
```@example msa_indexing
getresiduesequences(msa)
```
## [Describing your MSA](@id Describing-your-MSA)
The MSA module has a number of functions to gain insight about your MSA. Using `MIToS.MSA`,
one can easily ask for...
- The **number of columns and sequences** with the `ncolumns` and `nsequences` functions.
- The fraction of columns with residues (**coverage**) for each sequence making use of the `coverage` method.
- The **fraction or percentage of gaps/residues** using with the functions `gapfraction`, `residuefraction` and `columngapfraction`.
- The **percentage of identity** (PID) between each sequence of the MSA or its mean value with `percentidentity` and `meanpercentidentity`.
The percentage identity between two aligned sequences is a common measure of sequence
similarity and is used by the `hobohmI` method to estimate and reduce MSA redundancy.
**MIToS functions to calculate percent identity don't align the sequences, they need
already aligned sequences.** Full gaps columns don't count to the alignment length.
```@example msa_describe
using MIToS.MSA
msa = permutedims(hcat(
res"--GGG-", # res"..." uses the @res_str macro to create a (column) Vector{Residue}
res"---GGG",
), (2, 1))
# identities 000110 sum 2
# aligned residues 001111 sum 4
```
```@example msa_describe
percentidentity(msa[1, :], msa[2, :]) # 2 / 4
```
To quickly calculate if the percentage of identity is greater than a determined value, use
that threshold as third argument. `percentidentity(seqa, seqb, pid)` is a lot more faster
than `percentidentity(seqa, seqb) >= pid`.
```@example msa_describe
percentidentity(msa[1, :], msa[2, :], 62) # 50% >= 62%
```
#### [Example: Plotting gap percentage per column and coverage per sequence](@id Example:-Plotting-gap-percentage-per-column-and-coverage-per-sequence)
The `gapfraction` and `coverage` functions return a vector of numbers between `0.0` and
`1.0` (fraction of...). Sometime it's useful to plot this data to quickly understand the
MSA structure. In this example, we are going to use the [Plots](http://plots.readthedocs.org/en/latest/)
package for plotting, with the [GR](https://github.com/jheinen/GR.jl)
backend, but you are free to use any of the Julia plotting libraries.
```@setup msa_plots
@info "MSA: Plots"
using Plots
gr() # Hide possible warnings
```
```@example msa_plots
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
using Plots
gr(size = (600, 300))
plot(
# x is a range from 1 to the number of columns
1:ncolumns(msa),
# y is a Vector{Float64} with the percentage of gaps of each column
vec(columngapfraction(msa)) .* 100.0,
linetype = :line,
ylabel = "gaps [%]",
xlabel = "columns",
legend = false,
)
png("msa_gaps.png") # hide
nothing # hide
```

```@example msa_plots
plot(
# x is a range from 1 to the number of sequences
1:nsequences(msa),
# y is a Vector{Float64} with the coverage of each sequence
vec(coverage(msa)) .* 100,
linetype = :line,
ylabel = "coverage [%]",
xlabel = "sequences",
legend = false,
)
png("msa_coverage.png") # hide
nothing # hide
```

```@example msa_plots
plot(msa)
png("msa_msa.png") # hide
nothing # hide
```

#### [Example: Filter sequences per coverage and columns per gap fraction](@id Example:-Filter-sequences-per-coverage-and-columns-per-gap-fraction)
Taking advantage of the `filter...!` functions and the `coverage` and `columngapfraction`
functions, it's possible to delete short sequences or columns with a lot of gaps.
```@example msa_plots
println("\tsequences\tcolumns")
println("Before:\t", nsequences(msa), "\t\t", ncolumns(msa))
# delete sequences with less than 90% coverage of the MSA length:
filtersequences!(msa, coverage(msa) .>= 0.9)
# delete columns with more than 10% of gaps:
filtercolumns!(msa, columngapfraction(msa) .<= 0.1)
println("After:\t", nsequences(msa), "\t\t", ncolumns(msa))
```
```@example msa_plots
histogram(
vec(columngapfraction(msa)),
# Using vec() to get a Vector{Float64} with the fraction of gaps of each column
xlabel = "gap fraction in [0,1]",
bins = 20,
legend = false,
)
png("msa_hist_gaps.png") # hide
nothing # hide
```

```@example msa_plots
histogram(
vec(coverage(msa) .* 100.0), # Column with the coverage of each sequence
xlabel = "coverage [%]",
legend = false,
)
png("msa_hist_coverage.png") # hide
nothing # hide
```

#### [Example: Plotting the percentage of identity between sequences](@id Example:-Plotting-the-percentage-of-identity-between-sequences)
The distribution of the percentage of identity between every pair of sequences in an MSA,
gives an idea of the MSA diversity. In this example, we are using `percentidentity`
over an MSA to get those identity values.
```@example msa_pid
using MIToS.MSA
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
pid = percentidentity(msa)
nothing # hide
```
MIToS stores the matrix of percentage of identity between the aligned sequences as a
PairwiseListMatrix from the [PairwiseListMatrices](http://diegozea.github.io/PairwiseListMatrices.jl/)
package. This matrix type saves RAM, allowing the storage of big matrices. In this
example, we use the `to_table` function of *PairwiseListMatrices* to convert the matrix
into a table with indices.
```@example msa_pid
using PairwiseListMatrices
pidtable = to_table(pid, diagonal = false)
```
The function `quantile` gives a quick idea of the percentage identity distribution of the MSA.
```@example msa_pid
using Statistics
quantile(convert(Vector{Float64}, pidtable[:, 3]), [0.00, 0.25, 0.50, 0.75, 1.00])
```
The function `meanpercentidentity` gives the mean value of the percent identity
distribution for MSA with less than 300 sequences, or a quick estimate (mean PID in a
random sample of sequence pairs) otherwise unless you set `exact` to `true`.
```@example msa_pid
meanpercentidentity(msa)
```
One can easily plot that matrix and its distribution using the `heatmap` and `histogram`
functions of the [Plots](https://github.com/tbreloff/Plots.jl)
package.
```@setup msa_pid
@info "MSA: PID"
using Plots
gr() # Hide possible warnings
```
```@example msa_pid
using Plots
gr()
heatmap(convert(Matrix, pid), yflip = true, ratio = :equal)
png("msa_heatmap_pid.png") # hide
nothing # hide
```

```@example msa_pid
histogram(pidtable[:, 3], xlabel = "Percentage of identity", legend = false)
png("msa_hist_pid.png") # hide
nothing # hide
```

## [Sequence clustering](@id Sequence-clustering)
The `MSA` module allows to clusterize sequences in an MSA. The `hobohmI` function takes as
input an MSA followed by an identity threshold value, and returns a `Clusters` type
with the result of a Hobohm I sequence clustering [hobohm1992selection](@cite).
The Hobohm I algorithm will add a sequence to an existing cluster, if
the percentage of identity is equal or greater than the threshold.
The `Clusters` is sub-type of `ClusteringResult` from the [Clustering.jl](http://clusteringjl.readthedocs.org/en/latest/index.html)
package. One advantage of use a sub-type of `ClusteringResult`is that you are able to use
any method defined on `Clustering.jl` like `varinfo` (Variation of Information) for example.
Also, you can use any clustering algorithm included in *Clustering.jl*, and convert its
result to an `Clusters` object to use it with MIToS.
`MSA` defines the functions `nclusters` to get the resulting number of clusters, `counts`
to get the number of sequences on each cluster and `assignments` to get the cluster number
of each sequence. The most important method is `getweight`, which returns the weight of
each sequence. This method is used in the `Information` module of MIToS to reduce redundancy.
#### [Example: Reducing redundancy of a MSA](@id Example:-Reducing-redundancy-of-a-MSA)
MSAs can suffer from an unnatural sequence redundancy and a high number of protein
fragments. In this example, we are using a sequence clustering to make a non-redundant set
of representative sequences. We are going to use the function `hobohmI` to perform the
clustering with the Hobohm I algorithm at 62% identity.
```@setup msa_clusters
@info "MSA: Clusters"
using Plots
using StatsPlots
using DataFrames
gr() # Hide possible warnings
```
```@example msa_clusters
using MIToS.MSA
using Clustering # to use the nclusters and assignments functions
msa = read_file(
"https://raw.githubusercontent.com/diegozea/MIToS.jl/master/docs/data/PF18883.stockholm.gz",
Stockholm,
)
println("This MSA has ", nsequences(msa), " sequences...")
```
```@example msa_clusters
clusters = hobohmI(msa, 62)
```
```@example msa_clusters
println(
"...but has only ",
nclusters(clusters),
" sequence clusters after a clustering at 62% identity.",
)
```
```@example msa_clusters
using Plots
gr()
plot(msa)
png("msa_clusters_i.png") # hide
nothing # hide
```

We are going to use the [DataFrames](http://dataframesjl.readthedocs.org/en/latest/)
package to easily select the sequence with the highest coverage of each cluster.
```@example msa_clusters
using DataFrames
df = DataFrame(
seqnum = 1:nsequences(msa),
seqname = sequencenames(msa),
cluster = assignments(clusters), # the cluster number/index of each sequence
coverage = vec(coverage(msa)),
)
first(df, 5)
```
It is possible to use this `DataFrame` and `Plots` to plot the sequence coverage of the MSA
and also an histogram of the number of sequences in each cluster:
```@example msa_clusters
using StatsPlots # Plotting DataFrames
h = @df df histogram(:cluster, ylabel = "nseq")
p = @df df plot(:cluster, :coverage, linetype = :scatter)
plot(p, h, nc = 1, xlim = (0, nclusters(clusters) + 1), legend = false)
png("msa_clusters_ii.png") # hide
nothing # hide
```

We use the *Split-Apply-Combine* strategy, though the `groupby` and `combine` function of
the `DataFrames` package, to select the sequence of highest coverage for each cluster.
```@example msa_clusters
grouped_df = groupby(df, :cluster)
maxcoverage = combine(grouped_df) do cl
row_index = findmax(cl.coverage)[2]
cl[row_index, [:seqnum, :seqname, :coverage]]
end
first(maxcoverage, 5)
```
```@example msa_clusters
p = @df maxcoverage plot(:cluster, :coverage, linetype = :scatter)
h = @df maxcoverage histogram(:cluster, ylabel = "nseq")
plot(p, h, nc = 1, xlim = (0, nclusters(clusters) + 1), legend = false)
png("msa_clusters_iii.png") # hide
nothing # hide
```

We can easily generate a mask using list comprehension, to select only the representative
sequences of the MSA (deleting the rest of the sequences with `filtersequences!`).
```@example msa_clusters
cluster_references = Bool[seqnum in maxcoverage.seqnum for seqnum = 1:nsequences(msa)]
```
```@example msa_clusters
filtersequences!(msa, cluster_references)
```
```@example msa_clusters
plot(msa)
png("msa_clusters_iv.png") # hide
nothing # hide
```

## [Concatenating MSAs](@id Concatenating-MSAs)
Concatenating multiple sequence alignments can be helpful in various bioinformatics
applications. It allows researchers to combine the alignments of different sequences or
regions into a single MSA for further analysis. Examples of this maneuver are
concatenating two protein sequences from the same organism to estimate coevolution
among those proteins or to model the protein-protein interaction using tools such as
AlphaFold.
### Horizontal and Vertical Concatenation
We can concatenate two MSAs as matrices using Julia's `hcat` and `vcat` functions.
However, MIToS defines special methods for these functions on MSA objects to deal with
sequence and column names and annotations. To use `hcat`, we only need the MSA having
the same number of sequences. The `hcat` function will concatenate the first sequence of
the first MSA with the first sequence of the second MSA, and so on. For example, let's
define two small MSAs `msa_a` and `msa_b`, and concatenate them horizontally:
```@repl msa_hcat
using MIToS.MSA
msa_a = AnnotatedMultipleSequenceAlignment(Residue[
'A' 'R' 'N'
'D' 'C' 'Q'
]);
rename_sequences!(msa_a, ["SEQ1_A", "SEQ2_A"])
msa_b = AnnotatedMultipleSequenceAlignment(Residue[
'N' 'Q'
'E' 'G'
]);
rename_sequences!(msa_b, ["SEQ1_B", "SEQ2_B"])
concatenated_msa = hcat(msa_a, msa_b)
```
As you might have noticed, the `hcat` function preserves the **sequence names** by
concatenating them using `_&_` as a separator. So, the first sequence of the concatenated
MSA is `SEQ1_A_&_SEQ1_B`. Also, the **column names** have changed in the concatenated MSA.
For example, the first column of `msa_a` is now the first column of `concatenated_msa`,
but its name changed from `1` to `1_1`. The `hcat` function renames the columns so that
the first number, the one before the underscore, indicates the index of the sub-MSA.
The first sub-MSA in the concatenated MSA is `1`, the second sub-MSA is `2`, and so on.
This allows you to track the origin of each column in the concatenated MSA.
You can access a vector of those indices using the `gethcatmapping` function:
```@repl msa_hcat
gethcatmapping(concatenated_msa)
```
If we perform multiple concatenations—i.e., if we call `hcat` on an MSA output of another
call to `hcat`—the `hcat` function will remember the sub-MSA boundaries to continue the
numeration accordingly. For example, let's create and add a third MSA:
```@repl msa_hcat
msa_c = AnnotatedMultipleSequenceAlignment(Residue[
'A' 'H'
'A' 'H'
]);
rename_sequences!(msa_c, ["SEQ1_C", "SEQ2_C"])
hcat(concatenated_msa, msa_c)
```
As you can see, the `hcat` function detects the previous concatenation and continues the
indexing from the last MSA. So that column `1` of `msa_c` is now `3_1` in the concatenated
MSA. The `hcat` function can take more than two MSAs as arguments. For example, you can get
the same result as above by calling `hcat(msa_a, msa_b, msa_c)`.
To concatenate MSAs vertically, you can use the `vcat` function. The only requirement is
that the MSAs have the same number of columns. For example, let's define two small MSAs.
The first column of `msa_a` will be concatenated with the first column of `msa_b`,
and so on:
```@repl msa_vcat
using MIToS.MSA
msa_a = AnnotatedMultipleSequenceAlignment(Residue[
'A' 'R'
'D' 'C'
'E' 'G'
])
msa_b = AnnotatedMultipleSequenceAlignment(Residue[
'N' 'Q'
'D' 'R'
])
concatenated_msa = vcat(msa_a, msa_b)
```
In this case, `vcat` adds the MSA index prefix to the sequence names. So, the
sequence `1` of `msa_a` is now `1_1` in the concatenated MSA. The `vcat` function, similar
to `hcat`, can take more than two MSAs as arguments in case you need to concatenate
multiple alignments vertically.
### Joining MSAs
Sometimes, you may need to join or merge two MSAs, having different number of sequences or
columns. For such cases, MIToS provides the [`join_msas`](@ref MIToS.MSA.join_msas)
function. This function allows you to join two MSAs based on specified matching positions
or names. It supports different types of joins: inner, outer, left, and right.
You can indicate the positions or names to match using an iterable of pairs or separate
lists of positions or names. For example, using a `OrderedDict` from the
`OrderedCollections` package, you can identify which positions on the first MSA (the keys)
should match with which positions on the second MSA (the values).
Let's see that in one fictional example:
```@repl msa_join
using MIToS.MSA, OrderedCollections
msa_a = AnnotatedMultipleSequenceAlignment(Residue[
'A' 'R' 'D'
'G' 'K' 'E'
'G' 'R' 'D'
]);
rename_sequences!(msa_a, ["aa_HUMAN", "bb_MOUSE", "cc_YEAST"])
msa_b = AnnotatedMultipleSequenceAlignment(Residue[
'N' 'A'
'E' 'G'
'E' 'A'
]);
rename_sequences!(msa_b, ["AA_HUMAN", "BB_MOUSE", "CC_SHEEP"])
pairing = OrderedDict("aa_HUMAN" => "AA_HUMAN", "bb_MOUSE" => "BB_MOUSE")
join_msas(msa_a, msa_b, pairing)
```
As we can see, the `join_msas` function has matched the sequences on both MSAs based on the
specified pairing—in this example, we create a dictionary to pair the sequences from
the same species. The `join_msas` have two important keyword arguments: `kind` and `axis`.
By default, the function performs an outer join (`kind = :outer`) and matches the sequences
(`axis = 1`). You can change these arguments to perform other kinds of joins or to
match the columns. Since we performed an outer join, the resulting MSA contains all
sequences from both input MSAs, and `join_msas` have added gaps where the sequences do not
match.
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 500 | ```@setup log
@info "MSA API docs"
```
# MSA
```@docs
MIToS.MSA
```
## Contents
```@contents
Pages = ["MSA_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.MSA]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.MSA]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.MSA]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.MSA]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 9812 | ```@setup log
@info "PDB docs"
```
# [PDB](@id Module-PDB)
The module `PDB` defines types and methods to work with protein structures inside Julia. It
is useful to link structural and sequential information, and needed for measure the
predictive performance at protein contact prediction of mutual information scores.
```julia
using MIToS.PDB # to load the PDB module
```
## Features
- [**Read and parse**](@ref Read-and-parse-PDB-files) mmCIF, PDB, and PDBML files.
- Download structures from the PDB and AlphaFold databases.
- Calculate distance and contacts between atoms or residues.
- Determine interaction between residues.
## Contents
```@contents
Pages = ["PDB.md"]
Depth = 4
```
## Retrieve information from PDB database
This module exports the `downloadpdb` function, to retrieve a PDB file from
[PDB database](http://www.rcsb.org/pdb/home/home.do).
By default, this function downloads a gzipped mmCIF file (`format=MMCIFFile`), which could
be easily read by MIToS. You are able to determine the `format` as `PDBFile` if you want to
download a PDB file instead.
```@example pdb_io
using MIToS.PDB
pdbfile = downloadpdb("1IVO", format = PDBFile)
```
`PDB` module also exports a `getpdbdescription` to access the header information of a
PDB entry.
```@example pdb_io
getpdbdescription("1IVO")
```
## Retrieve information from AlphaFold database
This module provides functions to download and query protein structures from AlphaFold DB.
The `download_alphafold_structure` function downloads the structure file, in mmCIF format
by default, for a given UniProt Accession ID. You can set `format` to `PDBFile` to download
a PDB file instead.
```@example alphafold_io
using MIToS.PDB
# Get the structure for the human insulin
file = download_alphafold_structure("P01308")
```
If you need more information about that entry, you can use the `query_alphafolddb` function.
The `query_alphafolddb` function returns an `JSON3.Object` that works like a dictionary.
```@example alphafold_io
json_result = query_alphafolddb("P01308")
```
You can access the information in the `JSON3.Object` using the keys. For example, to get
the URL to the PAE matrix image:
```@example alphafold_io
pae_image_url = json_result["paeImageUrl"]
```
## [Read and parse PDB files](@id Read-and-parse-PDB-files)
This is easy using the `read_file` and `parse_file` functions, indicating the filename and the
`FileFormat`: `PDBML` for PDB XML files or `PDBFile` for usual PDB files. These functions
returns a `Vector` of `PDBResidue` objects with all the residues in the PDB.
To return only a specific subset of residues/atoms you can use any of the following
keyword arguments:
| keyword arguments | default | returns only ... |
|:----------------- |:------- |:------------------------------------------------------------------ |
| `chain` | `All` | residues from a PDB chain, i.e. `"A"` |
| `model` | `All` | residues from a determined model, i.e. `"1"` |
| `group` | `All` | residues from a group: `"ATOM"`, `"HETATM"` or `All` for both |
| `atomname` | `All` | atoms with a specific name, i.e. `"CA"` |
| `onlyheavy` | `false` | heavy atoms (not hydrogens) if it's `true` |
| `occupancyfilter` | `false` | only the atoms with the best occupancy are returned if it's `true` |
!!! note
**For PDBML files** it is possible to use the keyword argument `label` to `false`
(default to `true`) to get the **auth_** attributes instead of the **label_**
attributes for `chain`, `atom` and residue `name` fields. The **auth_** attributes are
alternatives provided by an author in order to match the identification/values used
in the publication that describes the structure.
```@example pdb_io
# Read α carbon of each residue from the 1ivo pdb file, in the model 1, chain A and in the ATOM group.
CA_1ivo =
read_file(pdbfile, PDBFile, model = "1", chain = "A", group = "ATOM", atomname = "CA")
CA_1ivo[1] # First residue. It has only the α carbon.
```
## Looking for particular residues
MIToS parse PDB files to vector of residues, instead of using a hierarchical structure
like other packages. This approach makes the search and selection of residues or atoms a
little different.
To make it easy, this module exports the `select_residues` and `select_atoms` functions.
Given the fact that residue numbers from different chains, models, etc. can collide, we
can indicate the `model`, `chain`, `group`, `residue` number and `atom` name using the
keyword arguments of those functions. If you want to select all the residues in one of the
categories, you are able to use the type `All` (this is the default value of such arguments).
You can also use regular expressions or functions to make the selections.
```@example pdb_select
using MIToS.PDB
pdbfile = downloadpdb("1IVO", format = PDBFile)
residues_1ivo = read_file(pdbfile, PDBFile)
# Select residue number 9 from model 1 and chain B (it looks in both ATOM and HETATM groups)
select_residues(residues_1ivo, model = "1", chain = "B", residue = "9")
```
### Getting a `Dict` of `PDBResidue`s
If you prefer a `Dict` of `PDBResidue`, indexed by their residue numbers, you can use the
`residuedict` function.
```@example pdb_select
# Dict of residues from the model 1, chain A and from the ATOM group
chain_a = residuesdict(residues_1ivo, model = "1", chain = "A", group = "ATOM")
chain_a["9"]
```
### Select particular residues
Use the `select_residues` function to collect specific residues. It's possible to use a single
**residue number** (i.e. `"2"`) or even a **function** which should return true for the
selected residue numbers. Also **regular expressions** can be used to select residues.
Use `All` to select all the residues.
```@example pdb_select
residue_list = map(string, 2:5)
# If the list is large, you can use a `Set` to gain performance
# residue_set = Set(map(string, 2:5))
```
```@example pdb_select
first_res = select_residues(
residues_1ivo,
model = "1",
chain = "A",
group = "ATOM",
residue = resnum -> resnum in residue_list,
)
for res in first_res
println(res.id.name, " ", res.id.number)
end
```
A more complex example using an anonymous function:
```@example pdb_select
# Select all the residues of the model 1, chain A of the ATOM group with residue number less than 5
first_res = select_residues(
residues_1ivo,
model = "1",
chain = "A",
group = "ATOM",
residue = x -> parse(Int, match(r"^(\d+)", x)[1]) <= 5,
)
# The anonymous function takes the residue number (string) and use a regular expression
# to extract the number (without insertion code).
# It converts the number to `Int` to test if the it is `<= 5`.
for res in first_res
println(res.id.name, " ", res.id.number)
end
```
### Select particular atoms
The `select_atoms` function allow to select a particular set of atoms.
```@example pdb_select
# Select all the atoms with name starting with "C" using a regular expression
# from all the residues of the model 1, chain A of the ATOM group
carbons = select_atoms(
residues_1ivo,
model = "1",
chain = "A",
group = "ATOM",
residue = All,
atom = r"C.+",
)
carbons[1]
```
## Protein contact map
The PDB module offers a number of functions to measure `distance`s between atoms or
residues, to detect possible interactions or `contact`s. In particular the `contact`
function calls the `distance` function using a threshold or limit in an optimized way.
The measure can be done between alpha carbons (`"CA"`), beta carbons (`"CB"`) (alpha carbon
for glycine), any heavy atom (`"Heavy"`) or any (`"All"`) atom of the residues.
In the following **example**, whe are going to plot a contact map for the *1ivo* chain A.
Two residues will be considered in contact if their β carbons (α carbon for glycine) have a
distance of 8Å or less.
```@example pdb_cmap
using MIToS.PDB
pdbfile = downloadpdb("1IVO", format = PDBFile)
residues_1ivo = read_file(pdbfile, PDBFile)
pdb = select_residues(residues_1ivo, model = "1", chain = "A", group = "ATOM")
dmap = distance(pdb, criteria = "All") # Minimum distance between residues using all their atoms
```
Use the `contact` function to get a contact map:
```@example pdb_cmap
cmap = contact(pdb, 8.0, criteria = "CB") # Contact map
```
```@setup pdb_cmap
@info "PDB: Cmap"
using Plots
gr() # Hide possible warnings
```
```@example pdb_cmap
using Plots
gr()
heatmap(dmap, grid = false, yflip = true, ratio = :equal)
png("pdb_dmap.png") # hide
nothing # hide
```

```@example pdb_cmap
heatmap(cmap, grid = false, yflip = true, ratio = :equal)
png("pdb_cmap.png") # hide
nothing # hide
```

## Structural superposition
```@setup pdb_rmsd
@info "PDB: RMSD"
using Plots
gr() # Hide possible warnings
```
```@example pdb_rmsd
using MIToS.PDB
pdbfile = downloadpdb("2HHB")
res_2hhb = read_file(pdbfile, MMCIFFile)
chain_A = select_residues(res_2hhb, model = "1", chain = "A", group = "ATOM", residue = All)
chain_C = select_residues(res_2hhb, model = "1", chain = "C", group = "ATOM", residue = All)
using Plots
gr()
scatter3d(chain_A, label = "A", alpha = 0.5)
scatter3d!(chain_C, label = "C", alpha = 0.5)
png("pdb_unaligned.png") # hide
nothing # hide
```

```@example pdb_rmsd
superimposed_A, superimposed_C, RMSD = superimpose(chain_A, chain_C)
RMSD
```
```@example pdb_rmsd
scatter3d(superimposed_A, label = "A", alpha = 0.5)
scatter3d!(superimposed_C, label = "C", alpha = 0.5)
png("pdb_aligned.png") # hide
nothing # hide
```

| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 500 | ```@setup log
@info "PDB API docs"
```
# PDB
```@docs
MIToS.PDB
```
## Contents
```@contents
Pages = ["PDB_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.PDB]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.PDB]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.PDB]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.PDB]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 3641 | ```@setup log
@info "Pfam docs"
```
# [Pfam](@id Module-Pfam)
MIToS defines methods and types useful for any MSA. The `Pfam` module uses other MIToS
modules in the context of Pfam MSAs, where it’s possible to us determine how structure and
sequence information should be mapped. This module defines functions that go from a Pfam
MSA to the protein contact prediction performance of pairwise scores estimated from that MSA.
```@example pfam_example
using MIToS.Pfam # to load the Pfam module
```
## Features
- [**Download and read**](@ref Getting-a-Pfam-MSA) Pfam MSAs.
- Obtain [**PDB information**](@ref Getting-PDB-information-from-an-MSA) from alignment annotations.
- [**Map**](@ref Getting-PDB-information-from-an-MSA) between sequence/alignment residues/columns and PDB structures.
- Measure of [**AUC**](@ref PDB-contacts-and-AUC) (ROC curve) for [**protein contact**](@ref PDB-contacts-and-AUC) prediction of MI scores.
## Contents
```@contents
Pages = ["Pfam.md"]
Depth = 4
```
## [Getting a Pfam MSA](@id Getting-a-Pfam-MSA)
The function `downloadpfam` takes a Pfam accession and downloads a Pfam MSA in Stockholm
format. In that way, you can do
```julia
pfamfile = downloadpfam("PF18883")
```
to get the MSA. But, we are going to use an already downloaded file in this case:
```@example pfam_example
using MIToS
pfamfile = joinpath(dirname(pathof(MIToS)), "..", "docs", "data", "PF18883.stockholm.gz");
```
Use `read_file` function and the `Stockholm` `FileFormat` to get a
`AnnotatedMultipleSequenceAlignment` object with the MSA and its Pfam annotations.
You must set `generatemapping` and `useidcoordinates` to `true` the first time you read
the downloaded MSA. This is necessary to some of the methods in the `Pfam` module.
```@example pfam_example
msa = read_file(pfamfile, Stockholm, generatemapping = true, useidcoordinates = true)
```
## [Getting PDB information from an MSA](@id Getting-PDB-information-from-an-MSA)
The function `getseq2pdb` parses the MSA annotations to return a `Dict` from the sequence
identifier in the MSA to PDB and chain codes.
```@example pfam_example
getseq2pdb(msa)
```
Once you know the association between PDB chains and sequences, you can use that
information together with the `msacolumn2pdbresidue` function to get the PDB residue
number that correspond to each MSA column for given a determined sequence and PDB chain.
That function downloads information from SIFTS to generate the mapping.
```@example pfam_example
col2res = msacolumn2pdbresidue(msa, "ICSA_SHIFL/611-720", "3ML3", "A")
```
The returned dictionary can be used to get the PDB residue associated to each column
(using the `msaresidues` function)...
```@example pfam_example
using MIToS.PDB
pdbfile = downloadpdb("3ML3")
pdb = read_file(pdbfile, MMCIFFile)
resdict = residuesdict(pdb, model = "1", chain = "A", group = "ATOM")
msaresidues(msa, resdict, col2res)
```
...or to delete the columns without PDB residues (using the `hasresidues` function):
```@example pfam_example
using MIToS.MSA
filtercolumns!(msa, hasresidues(msa, col2res))
```
### [PDB contacts and AUC](@id PDB-contacts-and-AUC)
The `Dict` between MSA columns and PDB residue number also can be used to generate a
protein contact map associated to the MSA.
```@example pfam_example
cmap = msacontacts(msa, resdict, col2res)
```
That protein contact map can be used to calculate the Area Under the ROC Curve for a given
score with the `AUC` function.
```@example pfam_example
using MIToS.Information
ZMIp, MIp = buslje09(msa)
using ROCAnalysis # You need to load ROCAnalysis to use the AUC function
AUC(ZMIp, cmap)
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 508 | ```@setup log
@info "Pfam API docs"
```
# Pfam
```@docs
MIToS.Pfam
```
## Contents
```@contents
Pages = ["Pfam_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.Pfam]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.Pfam]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.Pfam]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.Pfam]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 73 | ```@setup log
@info "References"
```
# References
```@bibliography
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 9485 | ```@setup log
@info "SIFTS docs"
```
# [SIFTS](@id Module-SIFTS)
The `SIFTS` module of MIToS allows to obtain the residue-level mapping between databases
stored in the SIFTS XML files. It makes easy to assign PDB residues to UniProt/Pfam
positions.
Given the fact that pairwise alignments can lead to misleading association between
residues in both sequences, SIFTS offers more reliable association between sequence and
structure residue numbers.
```julia
using MIToS.SIFTS # to load the SIFTS module
```
## Features
- Download and parse SIFTS XML files
- Store residue-level mapping in Julia
- Easy generation of `Dict`s between residues numbers
## Contents
```@contents
Pages = ["SIFTS.md"]
Depth = 4
```
## [Simplest residue-level mapping](@id Simplest-residue-level-mapping)
This module export the function `siftsmapping` to generate a `Dict` between residue
numbers. This function takes 5 positional arguments.
1. The name of the SIFTS XML file to parse,
2. the source database,
3. the source protein/structure identifier,
4. the destiny database and,
5. the destiny protein/structure identifier.
Optionally it’s possible to indicate a particular PDB `chain` and if `missings` will be used.
Databases should be indicated using an available sub-type of `DataBase`. Keys and values
types will be depend on the residue number type in that database.
| Type `db...` | Database | Residue number type |
|:------------ |:-------------------------------------------------------- |:------------------- |
| `dbPDBe` | **PDBe** (Protein Data Bank in Europe) | `Int` |
| `dbInterPro` | **InterPro** | `String` |
| `dbUniProt` | **UniProt** | `Int` |
| `dbPfam` | **Pfam** (Protein families database) | `Int` |
| `dbNCBI` | **NCBI** (National Center for Biotechnology Information) | `Int` |
| `dbPDB` | **PDB** (Protein Data Bank) | `String` |
| `dbCATH` | **CATH** | `String` |
| `dbSCOP` | **SCOP** (Structural Classification of Proteins) | `String` |
| `dbEnsembl` | **Ensembl** | `String` |
To download the XML SIFTS file of a determined PDB use the `downloadsifts` function.
```@setup sifts_simple
using MIToS.SIFTS
import MIToS # to use pathof(MIToS)
siftsfile = joinpath(dirname(pathof(MIToS)), "..", "docs", "data", "1ivo.xml.gz");
```
```@example sifts_simple
using MIToS.SIFTS
```
```julia
siftsfile = downloadsifts("1IVO")
```
The following example, shows the residue number mapping between *Pfam* and *PDB*.
*Pfam* uses *UniProt* coordinates and *PDB* uses their own residue numbers with insertion
codes. Note that **the `siftsmapping` function is case sensitive**, and that
**SIFTS stores PDB identifiers using lowercase characters**.
```@example sifts_simple
siftsmap = siftsmapping(
siftsfile,
dbPfam,
"PF00757",
dbPDB,
"1ivo", # SIFTS stores PDB identifiers in lowercase
chain = "A", # In this example we are only using the chain A of the PDB
missings = false,
) # Residues without coordinates aren't used in the mapping
```
## [Storing residue-level mapping](@id Storing-residue-level-mapping)
If you need more than the residue number mapping between two databases, you could access
all the residue-level cross references using the function `read_file` in the `SIFTSXML``File.Format`
file. The `parse_file` function (and therefore the `read_file` function) for the `SIFTSXML` format,
also takes the keyword arguments `chain` and `missings`. The `read_file`/`parse_file` function
returns a `Vector` of `SIFTSResidue`s objects that stores the cross references between
residues in each database.
```@setup sifts_simple
siftsresidues = read_file(siftsfile, SIFTSXML, chain="A", missings=false) # Array{SIFTSResidue,1}
residue_data = siftsresidues[301];
```
You are free to access the `SIFTSResidue` fields in order to get the desired information.
`SIFTSResidue` objects contain `db...` objects (sub-types of `DataBase`), with the cross
referenced information. You should note that, except for the `PDBe` and `InterPro` fields,
the field values can be `missing`. The `ismissing` function is helpful to know if there
is a `db...` object. For example, getting the UniProt residue name
(one letter code of the amino acid) would be:
```@example sifts_simple
ismissing(residue_data.UniProt) ? "" : residue_data.UniProt.name
```
That line of code returns an empty string if the UniProt field is `missing`. Otherwise, it
returns a string with the name of the residue in UniProt. Because that way of access
values in a SIFT residue is too verbose, MIToS defines a more complex signature for `get`.
Using MIToS `get` the previous line of code will be:
```@example sifts_simple
# SIFTSResidue database field default
get(residue_data, dbUniProt, :name, "")
```
The is not need to use the full signature. Other signatures are possible
depending on the value you want to access. In particular, a `missing` object
is returned if a default value is not given at the end of the signature and the
value to access is missing:
```@setup sifts_repl
import MIToS # to use pathof(MIToS)
siftsfile = joinpath(dirname(pathof(MIToS)), "..", "docs", "data", "1ivo.xml.gz")
using MIToS.SIFTS
residue_data = read_file(siftsfile, SIFTSXML)[301]; # hide
```
```@repl sifts_repl
get(residue_data, dbUniProt) # get takes the database type (`db...`)
get(residue_data, dbUniProt, :name) # and can also take a field name (Symbol)
```
But you don't need the `get` function to access the three letter code of the
residue in `PDBe` because the `PDBe` field can not be `missing`.
```@example sifts_simple
residue_data.PDBe.name
```
`SIFTSResidue` also store information about if that residue is `missing`
(i.e. not resolved) in the PDB structure and the information about the
secondary structure (`sscode` and `ssname`):
```@repl sifts_repl
residue_data.missing
residue_data.sscode
residue_data.ssname
```
### [Accessing residue-level cross references](@id Accessing-residue-level-cross-references)
You can ask for particular values in a single `SIFTSResidue` using the `get` function.
```@repl sifts_repl
using MIToS.SIFTS
residue_data = read_file(siftsfile, SIFTSXML)[301]
# Is the UniProt residue name in the list of basic amino acids ["H", "K", "R"]?
get(residue_data, dbUniProt, :name, "") in ["H", "K", "R"]
```
Use higher order functions and lambda expressions (anonymous functions) or
list comprehension to easily ask for information on the `Vector{SIFTSResidue}`. You can
use `get` with the previous signature or simple direct field access and `ismissing`.
```@example sifts_simple
# Captures PDB residue numbers if the Pfam id is "PF00757"
resnums = [
res.PDB.number for res in siftsresidues if
!ismissing(res.PDB) && get(res, dbPfam, :id, "") == "PF00757"
]
```
**Useful higher order functions are:**
**`findall`**
```@example sifts_simple
# Which of the residues have UniProt residue names in the list ["H", "K", "R"]? (basic residues)
indexes = findall(res -> get(res, dbUniProt, :name, "") in ["H", "K", "R"], siftsresidues)
```
**`map`**
```@example sifts_simple
map(i -> siftsresidues[i].UniProt, indexes) # UniProt data of the basic residues
```
**`filter`**
```@example sifts_simple
# SIFTSResidues with UniProt names in ["H", "K", "R"]
basicresidues =
filter(res -> get(res, dbUniProt, :name, "") in ["H", "K", "R"], siftsresidues)
basicresidues[1].UniProt # UniProt data of the first basic residue
```
#### [Example: Which residues are missing in the PDB structure](@id Example:-Which-residues-are-missing-in-the-PDB-structure)
Given that `SIFTSResidue` objects store a `missing` residue flag, it’s easy to get a
vector where there is a `true` value if the residue is missing in the structure.
```@setup sifts_repl_ii
import MIToS # to use pathof(MIToS)
siftsfile = joinpath(dirname(pathof(MIToS)), "..", "docs", "data", "1ivo.xml.gz");
```
```@repl sifts_repl_ii
using MIToS.SIFTS
sifts_1ivo = read_file(siftsfile, SIFTSXML, chain = "A"); # SIFTSResidues of the 1IVO chain A
[res.missing for res in sifts_1ivo]
```
However, if you need to filter using other conditions, you’ll find useful the `get`
function. In this example, we are going to ask for the *UniProt id*
(to avoid problems with fragments, tags or chimeric/fusion proteins). We are also using
`get` to select an specific PDB chain.
```@setup sifts_1jqz
using MIToS.SIFTS
import MIToS # to use pathof(MIToS)
siftsfile = joinpath(dirname(pathof(MIToS)), "..", "docs", "data", "1jqz.xml.gz");
```
```julia
siftsfile = downloadsifts("1JQZ")
```
```@repl sifts_1jqz
using MIToS.SIFTS
sifts_1jqz = read_file(siftsfile, SIFTSXML); # It has an amino terminal his tag
missings = [
(
(get(res, dbUniProt, :id, "") == "P05230") &
(get(res, dbPDB, :chain, "") == "A") &
res.missing
) for res in sifts_1jqz
];
println(
"There are only ",
sum(missings),
" missing residues in the chain A, associated to UniProt P05230",
)
println(
"But there are ",
sum([res.missing for res in sifts_1jqz]),
" missing residues in the PDB file.",
)
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 516 | ```@setup log
@info "SIFTS API docs"
```
# SIFTS
```@docs
MIToS.SIFTS
```
## Contents
```@contents
Pages = ["SIFTS_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.SIFTS]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.SIFTS]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.SIFTS]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.SIFTS]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 3714 | ```@setup log
@info "Scripts docs"
```
# MIToS' Scripts
The [MIToS_Scripts.jl](https://github.com/MIToSOrg/MIToS_Scripts.jl) package offers a set
of easy-to-use scripts for command-line execution without requiring Julia coding.
It includes several scripts designed for various bioinformatics tasks, such as measuring
estimating residue conservation and inter-residue coevolution, calculating distances between
residues in a protein structure, and more.
```@contents
Pages = ["Scripts.md"]
Depth = 4
```
## Installation
To install **MIToS_Scripts.jl**, you only need Julia 1.9 or later installed on your
system. Executing `julia` in the terminal to open the Julia REPL, and finally, run the
following command:
```julia
using Pkg
Pkg.add(url = "https://github.com/MIToSOrg/MIToS_Scripts.jl")
```
Then, you can get the location of the installed scripts by running the following command:
```julia
using MIToS_Scripts
scripts_folder = joinpath(pkgdir(MIToS_Scripts), "scripts")
```
You can run them from that location. Alternatively, you can add the location to your
`PATH` environment variable, or copy the scripts to a folder already in your `PATH` to
run them from anywhere.
## Usage
You can execute each provided script from your command line. For example, to run the `Buslje09.jl`
script—if you are located in the folder where it is the scripts—use:
```bash
julia Buslje09.jl input_msa_file
```
Refer to the documentation of each script for specific usage instructions; you can access
it by running the script with the `--help` or `-h` flag:
```bash
julia Buslje09.jl -h
```
## Scripts
```@setup scripts
using Pkg
project_folder = "MIToS_Scripts_Project"
isdir(project_folder) || mkdir(project_folder)
Pkg.activate(project_folder)
Pkg.add(url="https://github.com/MIToSOrg/MIToS_Scripts.jl")
using MIToS_Scripts
scripts_folder = joinpath(pkgdir(MIToS_Scripts), "scripts")
```
### Buslje09.jl
```@example scripts
script_path = joinpath(scripts_folder, "Buslje09.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### BLMI.jl
```@example scripts
script_path = joinpath(scripts_folder, "BLMI.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### Conservation.jl
```@example scripts
script_path = joinpath(scripts_folder, "Conservation.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### DownloadPDB.jl
```@example scripts
script_path = joinpath(scripts_folder, "DownloadPDB.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### Distances.jl
```@example scripts
script_path = joinpath(scripts_folder, "Distances.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### MSADescription.jl
```@example scripts
script_path = joinpath(scripts_folder, "MSADescription.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### PercentIdentity.jl
```@example scripts
script_path = joinpath(scripts_folder, "PercentIdentity.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### AlignedColumns.jl
```@example scripts
script_path = joinpath(scripts_folder, "AlignedColumns.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
### SplitStockholm.jl
```@example scripts
script_path = joinpath(scripts_folder, "SplitStockholm.jl") # hide
println(read(`$(Base.julia_cmd()) --project=$project_folder $script_path -h`, String)) #hide
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 533 | ```@setup log
@info "Utils API docs"
```
# [Utils](@id API-Utils)
```@docs
MIToS.Utils
```
## Contents
```@contents
Pages = ["Utils_API.md"]
Depth = 2
```
## Types
```@autodocs
Modules = [MIToS.Utils]
Private = false
Order = [:type]
```
## Constants
```@autodocs
Modules = [MIToS.Utils]
Private = false
Order = [:constant]
```
## Macros
```@autodocs
Modules = [MIToS.Utils]
Private = false
Order = [:macro]
```
## Methods and functions
```@autodocs
Modules = [MIToS.Utils]
Private = false
Order = [:function]
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 3.0.6 | 8995effa332b70686f53d0dca35a29950f418df7 | docs | 3339 | ```@raw html
<img class="display-light-only" src="./assets/mitos-logo.png" alt="MIToS"/>
<img class="display-dark-only" src="./assets/mitos-logo-dark.png" alt="MIToS"/>
```
*A Julia Package to Analyze Protein Sequences, Structures, and Evolutionary Information*
## Modules
MIToS tools are separated into different modules for different tasks.
- [MSA](@ref Module-MSA): This module defines multiple functions and types for dealing with
Multiple Sequence Alignments (MSAs) and their annotations. It also includes facilities
for sequence clustering and shuffling, among others.
- [PDB](@ref Module-PDB): This module defines types and methods to work with protein
structures from different sources, such as the Protein Data Bank (PDB) or AlphaFold DB.
It includes functions to superpose structures, measure the distance between residues, and much more.
- [Information](@ref Module-Information): This module defines residue contingency tables and
methods on them to estimate information measures. This allow to measure evolutionary
information on MSAs positions. It includes functions to estimate corrected mutual
information (ZMIp, ZBLMIp) between MSA columns, as well as conservation estimations using
Shannon entropy and the Kullback-Leibler divergence.
- [SIFTS](@ref Module-SIFTS): This module allows access to SIFTS residue-level mapping of
UniProt, Pfam, and other databases with PDB entries.
- [Pfam](@ref Module-Pfam): This module uses the previous modules to work with Pfam MSAs.
It also has useful parameter optimization functions to be used with Pfam alignments.
- [Utils](@ref API-Utils): MIToS has also a Utils module with common utils functions and
types used in different modules of this package.
## Citation
If you use MIToS [zea2017mitos](@cite), please cite:
*Diego J. Zea, Diego Anfossi, Morten Nielsen, Cristina Marino-Buslje; MIToS.jl: mutual information tools for protein sequence analysis in the Julia language, Bioinformatics, Volume 33, Issue 4, 15 February 2017, Pages 564–565, [https://doi.org/10.1093/bioinformatics/btw646](https://doi.org/10.1093/bioinformatics/btw646)*
## Older MIToS versions
You can change the MIToS version of the documentation at the bottom left of this site—the
older version available is MIToS 2.0. If you are using MIToS v1 in a version of Julia
pre-1.0, please read [this older documentation](https://diegozea.github.io/mitosghpage-legacy/) instead.
## Acknowledgments
MIToS was initially developed at the *Structural Bioinformatics Unit* of the
[*Fundación Instituto Leloir*](https://www.leloir.org.ar/) (*FIL*) in Argentina.
Its development now continues at the [*Molecular Assemblies and Genome Integrity*](https://www.i2bc.paris-saclay.fr/molecular-assemblies-and-genome-integrity/)
group of the [*Institute for Integrative Biology of the Cell*](https://www.i2bc.paris-saclay.fr/)
(*I2BC*) in France.
We want to thank all [**contributors**](https://github.com/diegozea/MIToS.jl/graphs/contributors)
who have helped improve MIToS. We also thank the Julia community and all the MIToS users
for their feedback and support.
```@raw html
<img class="display-light-only" src="./assets/FIL_I2BC.png" alt="FIL and I2BC logos"/>
<img class="display-dark-only" src="./assets/FIL_I2BC_dark.png" alt="FIL and I2BC logos"/>
```
| MIToS | https://github.com/diegozea/MIToS.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 262 | using Documenter, StanDiagnose
makedocs(
modules = [StanDiagnose],
format = Documenter.HTML(),
checkdocs = :exports,
sitename = "StanDiagnose.jl",
pages = Any["index.md"]
)
deploydocs(
repo = "github.com/goedman/StanDiagnose.jl.git",
)
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 572 | ######## CmdStan diagnose example ###########
using StanDiagnose
ProjDir = dirname(@__FILE__)
bernoulli_model = "
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"
bernoulli_data = Dict("N" => 10, "y" => [0, 1, 0, 1, 0, 0, 0, 0, 0, 1])
tmpdir = joinpath(@__DIR__, "tmp")
dm = DiagnoseModel("bernoulli", bernoulli_model, tmpdir);
rc = stan_diagnose(dm; data=bernoulli_data);
if success(rc)
diags = read_diagnose(dm)
println()
display(diags)
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 796 | """
$(SIGNATURES)
Helper infrastructure to compile and sample models using `cmdstan`.
"""
module StanDiagnose
using Reexport
using DocStringExtensions: FIELDS, SIGNATURES, TYPEDEF
@reexport using StanBase
import StanBase: update_model_file, par, handle_keywords!
import StanBase: executable_path, ensure_executable, stan_compile
import StanBase: update_json_files
import StanBase: data_file_path, init_file_path, sample_file_path
import StanBase: generated_quantities_file_path, log_file_path
import StanBase: diagnostic_file_path, setup_diagnostics
include("stanmodel/DiagnoseModel.jl")
include("stanrun/stan_run.jl")
include("stanrun/cmdline.jl")
include("stansamples/read_diagnose.jl")
stan_diagnose = stan_run
export
DiagnoseModel,
stan_diagnose,
read_diagnose
end # module
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 4336 | import Base: show
mutable struct DiagnoseModel <: CmdStanModels
name::AbstractString; # Name of the Stan program
model::AbstractString; # Stan language model program
# Sample fields
num_chains::Int64; # Number of chains
num_threads::Int64; # Number of threads
seed::Int; # Seed section of cmd to run cmdstan
refresh::Int; # Display progress in output files
init_bound::Int; # Bound for initial param values
# Check model gradient against finite difference
test::Symbol; # :gradient
epsilon::Float64; # Finite difference step size
error::Float64; # Error threshold
output_base::AbstractString; # Used for file paths to be created
tmpdir::AbstractString; # Holds all created files
exec_path::AbstractString; # Path to the cmdstan excutable
data_file::Vector{AbstractString}; # Array of data files input to cmdstan
init_file::Vector{AbstractString}; # Array of init files input to cmdstan
cmds::Vector{Cmd}; # Array of cmds to be spawned/pipelined
sample_file::Vector{String}; # Sample file array (.csv)
log_file::Vector{String}; # Log file array
diagnostic_file::Vector{String}; # Diagnostic file array
cmdstan_home::AbstractString; # Directory where cmdstan can be found
end
"""
# DiagnoseModel
Create a DiagnoseModel and compile the Stan Language Model..
### Required arguments
```julia
* `name::AbstractString` : Name for the model
* `model::AbstractString` : Stan model source
```
### Optional positional argument
```julia
`tmpdir::AbstractString` : Directory where output files are stored
```
"""
function DiagnoseModel(
name::AbstractString,
model::AbstractString,
tmpdir = mktempdir())
!isdir(tmpdir) && mkdir(tmpdir)
update_model_file(joinpath(tmpdir, "$(name).stan"), strip(model))
output_base = joinpath(tmpdir, name)
exec_path = executable_path(output_base)
cmdstan_home = CMDSTAN_HOME
error_output = IOBuffer()
is_ok = cd(cmdstan_home) do
success(pipeline(`$(make_command()) -f $(cmdstan_home)/makefile -C $(cmdstan_home) $(exec_path)`;
stderr = error_output))
end
if !is_ok
throw(StanModelError(model, String(take!(error_output))))
end
DiagnoseModel(name, model,
4, # num_chains
4, # num_threads
-1, # seed
100, # refresh
2, # init_bound
:gradient, # Test argument
1e-6, # Epsilon
1e-6, # Error
output_base, # Path to output files
tmpdir, # Tmpdir settings
exec_path, # exec_path
AbstractString[], # Data files
AbstractString[], # Init files
Cmd[], # Command lines
String[], # Sample .csv files
String[], # Log files
String[], # Diagnostic files
cmdstan_home)
end
function Base.show(io::IO, ::MIME"text/plain", m::DiagnoseModel)
println(io, "\nDiagnose section:")
println(io, " name = ", m.name)
println(io, " num_chains = ", m.num_chains)
println(io, " num_threads = ", m.num_threads)
println(io, " seed = ", m.seed)
println(io, " refresh = ", m.refresh)
println(io, " init_bound = ", m.init_bound)
println(io, "\nGradient section:")
println(io, " test = ", m.test)
println(io, " epsilon = ", m.epsilon)
println(io, " error = ", m.error)
println(io, "\nOther:")
println(io, " output_base = ", m.output_base)
println(io, " tmpdir = ", m.tmpdir)
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 1324 | """
# cmdline
Recursively parse the model to construct command line.
### Method
```julia
cmdline(m, id)
```
### Required arguments
```julia
* `m::SampleModel` # CmdStanSampleModel
* `id::Int` # Chain id
```
"""
function cmdline(m::DiagnoseModel, id)
#=
`./bernoulli diagnose test=gradient epsilon=1.0e-6 error=1.0e-6
random seed=-1 id=1 data file=bernoulli_1.data.R
output file=bernoulli_diagnose_1.csv refresh=100`
=#
cmd = ``
# Handle the model name field for unix and windows
cmd = `$(m.exec_path)`
# Diagnose specific portion of the model
cmd = `$cmd diagnose`
# Gradient specific portion of the model
cmd = `$cmd test=$(string(m.test))`
cmd = `$cmd epsilon=$(m.epsilon)`
cmd = `$cmd error=$(m.error)`
# Common to all models, not recursive
cmd = `$cmd random seed=$(m.seed)`
cmd = `$cmd id=$(id)`
# Data file required?
if length(m.data_file) > 0 && isfile(m.data_file[id])
cmd = `$cmd data file=$(m.data_file[id])`
end
# Output files
cmd = `$cmd output`
if length(m.sample_file) > 0
cmd = `$cmd file=$(m.sample_file[id])`
end
if length(m.diagnostic_file) > 0
cmd = `$cmd diagnostic_file=$(m.diagnostic_file[id])`
end
cmd = `$cmd refresh=$(m.refresh)`
cmd
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 2972 | """
stan_sample()
Draw from a StanJulia SampleModel (<: CmdStanModel.)
## Required argument
```julia
* `m <: CmdStanModels` # SampleModel.
* `use_json=true` # Use JSON3 for data and init files
* `check_num_chains=true` # Check for correct Julia chains and C++ chains
```
### Most frequently used keyword arguments
```julia
* `data` # Observations Dict or NamedTuple.
* `init` # Init Dict or NT (default: -2 to +2).
```
### Returns
```julia
* `rc` # Return code, 0 is success.
```
See extended help for other keyword arguments ( `??stan_sample` ).
# Extended help
### Additional configuration keyword arguments
```julia
* `num_chains=4` # Update number of chains.
* `num_samples=1000` # Number of samples.
* `num_warmups=1000` # Number of warmup samples.
* `save_warmup=false` # Save warmup samples.
* `thin=1` # Set thinning value.
* `refresh=100` # Output refresh rate.
* `seed=-1` # Set seed value.
* `test=:gradient` # Test :gradient.
* `epsilon=1e-8` # Finite difference step size.
* `error=1e-8` # Error threshold.
```
"""
function stan_run(m::T,
use_json=true,
check_num_chains=true; kwargs...) where {T <: CmdStanModels}
handle_keywords!(m, kwargs)
# Diagnostics files requested?
diagnostics = false
if :diagnostics in keys(kwargs)
diagnostics = kwargs[:diagnostics]
setup_diagnostics(m, m.num_chains)
end
# Remove existing sample files
for id in 1:m.num_chains
sfile = sample_file_path(m.output_base, id)
isfile(sfile) && rm(sfile)
end
if use_json
:init in keys(kwargs) && update_json_files(m, kwargs[:init],
m.num_chains, "init")
:data in keys(kwargs) && update_json_files(m, kwargs[:data],
m.num_chains, "data")
else
:init in keys(kwargs) && update_R_files(m, kwargs[:init],
m.num_chains, "init")
:data in keys(kwargs) && update_R_files(m, kwargs[:data],
m.num_chains, "data")
end
m.cmds = [stan_cmds(m, id; kwargs...) for id in 1:m.num_chains]
#println(typeof(m.cmds))
#println()
#println(m.cmds)
run(pipeline(par(m.cmds), stdout=m.log_file[1]))
end
"""
Generate a cmdstan command line (a run `cmd`).
$(SIGNATURES)
Internal, not exported.
"""
function stan_cmds(m::T, id::Integer; kwargs...) where {T <: CmdStanModels}
append!(m.sample_file, [sample_file_path(m.output_base, id)])
append!(m.log_file, [log_file_path(m.output_base, id)])
if length(m.diagnostic_file) > 0
append!(m.diagnostic_file, [diagnostic_file_path(m.output_base, id)])
end
cmdline(m, id)
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 1878 | """
# read_diagnose
Read diagnose output file created by cmdstan.
### Method
```julia
read_diagnose(m::Diagnose<odel)
```
### Required arguments
```julia
* `m::DiagnoseModel` : DiagnoseModel object
```
"""
function read_diagnose(m::DiagnoseModel)
## Collect the results of a chain in an array ##
res_type = "chain"
tdict = Dict()
local sstr
for i in 1:m.num_chains
if isfile("$(m.output_base)_$(res_type)_$(i).csv")
## A result type file for chain i is present ##
instream = open("$(m.output_base)_$(res_type)_$(i).csv")
if i == 1
# Extract cmdstan version
str = read(instream, String)
sstr = split(str)
tdict[:stan_version] = "$(parse(Int, sstr[4])).$(parse(Int, sstr[8])).$(parse(Int, sstr[12]))"
end
# Position sstr at element with with `probability=...`
indx = findall(x -> length(x)>11 && x[1:11] == "probability", sstr)[1]
sstr_lp = sstr[indx]
sstr_lp = parse(Float64, split(sstr_lp, '=')[2])
if :lp in keys(tdict)
append!(tdict[:lp], sstr_lp)
append!(tdict[:var_id], parse(Int, sstr[indx+11]))
append!(tdict[:value], parse(Float64, sstr[indx+12]))
append!(tdict[:model], parse(Float64, sstr[indx+13]))
append!(tdict[:finite_dif], parse(Float64, sstr[indx+14]))
append!(tdict[:error], parse(Float64, sstr[indx+15]))
else
# First time around, create value array
tdict[:lp] = [sstr_lp]
tdict[:var_id] = [parse(Int, sstr[indx+11])]
tdict[:value] = [parse(Float64, sstr[indx+12])]
tdict[:model] = [parse(Float64, sstr[indx+13])]
tdict[:finite_dif] = [parse(Float64, sstr[indx+14])]
tdict[:error] = [parse(Float64, sstr[indx+15])]
end
end
end
tdict
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 1015 | using StanDiagnose
using Test
if haskey(ENV, "JULIA_CMDSTAN_HOME") || haskey(ENV, "CMDSTAN")
ProjDir = dirname(@__FILE__)
bernoulli_model = "
data {
int<lower=0> N;
array[N] int<lower=0,upper=1> y;
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
"
data = Dict("N" => 10, "y" => [0, 1, 0, 1, 0, 0, 0, 0, 0, 1])
@testset "Bernoulli diagnose" begin
stanmodel = DiagnoseModel("bernoulli", bernoulli_model);
rc = stan_diagnose(stanmodel; data);
if success(rc)
diags = read_diagnose(stanmodel)
tmp = diags[:error][1]
@test round.(tmp, digits=6) ≈ 0.0
end
stanmodel = DiagnoseModel("bernoulli", bernoulli_model);
rc2 = stan_diagnose(stanmodel; data);
if success(rc2)
diags = read_diagnose(stanmodel)
tmp = diags[:error][1]
@test round.(tmp, digits=6) ≈ 0.0
end
end
else
println("\nCMDSTAN or JULIA_CMDSTAN_HOME not set. Skipping tests")
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 363 | ####
#### Coverage summary, printed as "(percentage) covered".
####
#### Useful for CI environments that just want a summary (eg a Gitlab setup).
####
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
covered_lines, total_lines = get_summary(process_folder())
percentage = covered_lines / total_lines * 100
println("($(percentage)%) covered")
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | code | 266 | # only push coverage from one bot
get(ENV, "TRAVIS_OS_NAME", nothing) == "linux" || exit(0)
get(ENV, "TRAVIS_JULIA_VERSION", nothing) == "1.1" || exit(0)
using Coverage
cd(joinpath(@__DIR__, "..", "..")) do
Codecov.submit(Codecov.process_folder())
end
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | docs | 1622 | # StanDiagnose.jl
| **Project Status** | **Build Status** |
|:---------------------------:|:-----------------:|
|![][project-status-img] | ![][CI-build] |
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://stanjulia.github.io/StanDiagnose.jl/latest
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://stanjulia.github.io/StanDiagnose.jl/stable
[CI-build]: https://github.com/stanjulia/StanDiagnose.jl/workflows/CI/badge.svg?branch=master
[issues-url]: https://github.com/stanjulia/StanDiagnose.jl/issues
[project-status-img]: https://img.shields.io/badge/lifecycle-stable-green.svg
## Important note
### Refactoring CmdStan.jl v6. Maybe this is an ok approach.
## Installation
This package is registered. Install with
```julia
pkg> add StanDiagnose.jl
```
You need a working [Stan's cmdstan](https://mc-stan.org/users/interfaces/cmdstan.html) installation, the path of which you should specify either in `CMDSTAN` or `JULIA_CMDSTAN_HOME`, eg in your `~/.julia/config/startup.jl` have a line like
```julia
# CmdStan setup
ENV["JULIA_CMDSTAN_HOME"] = expanduser("~/src/cmdstan-2.35.0/") # replace with your path
```
This package is derived from Tamas Papp's [StanRun.jl]() package.
## Usage
It is recommended that you start your Julia process with multiple worker processes to take advantage of parallel sampling, eg
```sh
julia -p auto
```
Otherwise, `stan_sample` will use a single process.
Use this package like this:
```julia
using StanDiagnose
```
See the docstrings (in particular `?StanDiagnose`) for more.
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 4.5.2 | dae7dcabd05545bc1dfcc7e85e82f2df3f1c8cc0 | docs | 43 | # StanDiagnose
*Documentation goes here.*
| StanDiagnose | https://github.com/StanJulia/StanDiagnose.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 15015 | # License is MIT: https://github.com/JuliaString/LaTeX_Entities/LICENSE.md
#
# Portions of this are based on code from julia/base/latex_symbols.jl
#
# Mapping from LaTeX math symbol to the corresponding Unicode codepoint.
# This is used for tab substitution in the REPL.
println("Running LaTeX build in ", pwd())
using LightXML
using StrTables
VER = UInt32(1)
#const dpath = "http://www.w3.org/Math/characters/"
const dpath = "http://www.w3.org/2003/entities/2007xml/"
const fname = "unicode.xml"
#const lpath = "http://mirror.math.ku.edu/tex-archive/macros/latex/contrib/unicode-math/"
const lpath = "https://raw.githubusercontent.com/wspr/unicode-math/master/"
const lname = "unicode-math-table.tex"
const disp = [false]
# Get manual additions to the tables
include("../src/manual_latex.jl")
const datapath = "../data"
const empty_str = ""
const element_types = ("mathlatex", "AMS", "IEEE", "latex")
function get_math_symbols(dpath, fname)
lname = joinpath(datapath, fname)
if isfile(fname)
println("Loaded: ", lname)
vers = lname
else
vers = string(dpath, fname)
download(vers, lname)
println("Saved to: ", lname)
end
xdoc = parse_file(lname)
latex_sym = [Pair{String, String}[] for i = 1:length(element_types)]
info = Tuple{Int, Int, String, String, String}[]
count = 0
# Handle differences in versions of unicode.xml document
rt = root(xdoc)
top = find_element(rt, "charlist")
top == nothing || (rt = top)
for c in child_nodes(rt)
if name(c) == "character" && is_elementnode(c)
ce = XMLElement(c)
for (ind, el) in enumerate(element_types)
latex = find_element(ce, el)
if latex == nothing
disp[] && println("##\t", attribute(ce, "id"), "\t", ce)
continue
end
L = strip(content(latex))
id = attribute(ce, "id")
U = string(map(s -> Char(parse_hex(UInt32, s)), split(id[2:end], "-"))...)
mtch = _contains(L, r"^\\[A-Za-z][A-Za-z0-9]*(\{[A-Za-z0-9]\})?$")
disp[] &&
println("#", count += 1, "\t", mtch%Int, " id: ", id, "\tU: ", U, "\t", L)
if mtch
L = L[2:end] # remove initial \
if length(U) == 1 && isascii(U[1])
# Don't store LaTeX names for ASCII characters
typ = 0
else
typ = 1
push!(latex_sym[ind], String(L) => U)
end
push!(info, (ind, typ, L, U, empty_str))
end
end
end
end
latex_sym, vers, info
end
function add_math_symbols(dpath, fname)
lname = joinpath(datapath, fname)
if isfile(fname)
println("Loaded: ", lname)
vers = lname
else
vers = string(dpath, fname)
download(vers, lname)
println("Saved to: ", lname)
end
latex_sym = Pair{String, String}[]
info = Tuple{Int, Int, String, String, String}[]
open(lname) do f
for L in eachline(f)
(isempty(L) || L[1] == '%') && continue
x = map(s -> rstrip(s, [' ','\t','\n']),
split(_replace(L, r"[{}\"]+" => "\t"), "\t"))
ch = Char(parse_hex(UInt32, x[2]))
nam = String(x[3][2:end])
startswith(nam, "math") && (nam = nam[5:end])
if isascii(ch)
typ = 0 # ASCII
elseif Base.is_id_char(ch)
typ = 1 # identifier
elseif Base.isoperator(Symbol(ch))
typ = 2 # operator
else
typ = 3
end
typ != 0 && push!(latex_sym, nam => string(ch))
push!(info, (2, typ, nam, string(ch), x[5]))
end
end
latex_sym, vers, info
end
#=
standard | v7.0 | new | type
----------|---------|-------|-------------------------------
mscr | scr | c_ | script/cursive
msans | sans | s_ | sans-serif
Bbb | bb | d_ | blackboard / doublestruck
mfrak | frak | f_ | fraktur
mtt | tt | t_ | mono
mit | it | i_ | italic
mitsans | isans | is_ | italic sans-serif
mitBbb | bbi | id_ | italic blackboard / doublestruct
mbf | bf | b_ | bold
mbfscr | bscr | bc_ | bold script/cursive
mbfsans | bsans | bs_ | bold sans-serif
mbffrak | bfrak | bf_ | bold fraktur
mbfit | bi | bi_ | bold italic
mbfitsans | bisans | bis_ | bold italic sans-serif
<greek> | G | greek
it<greek> | i_G | italic greek
bf<greek> | b_G | bold greek
bi<greek> | bi_G | bold italic greek
bsans<greek> | bs_G | bold sans-serif greek
bisans<greek> | bis_G | bold italic sans-serif greek
var<greek> | V | greek variant
mitvar<greek> | i_V | italic greek variant
mbfvar<greek> | b_V | bold greek variant
mbfitvar<greek> | bi_V | bold italic greek variant
mbfsansvar<greek> | bs_V | bold sans-serif greek variant
mbfitsansvar<greek> | bis_V | bold italic sans-serif greek variant
i -> imath ı
=#
function str_chr(val)
isempty(val) && return ""
io = IOBuffer()
for ch in val
print(io, hex(ch%UInt32,4), ':')
end
String(take!(io))[1:end-1]
end
function str_names(nameset)
io = IOBuffer()
allnames = sort(collect(nameset))
for n in allnames
print(io, n, " ")
end
String(take!(io))
end
function add_name(dic::Dict, val, nam)
if haskey(dic, val)
push!(dic[val], nam)
disp[] && println("\e[s$val\e[u\e[4C$(rpad(str_chr(val),20))", str_names(dic[val]))
else
dic[val] = Set((nam,))
disp[] && println("\e[s$val\e[u\e[4C$(rpad(str_chr(val),20))$nam")
end
end
function check_name(out::Dict, dic::Dict, val, nam, old)
oldval = get(dic, nam, "")
# Check if short name is already in table with same value
oldval == "" && return (add_name(out, val, nam); true)
oldval != val && disp[] && println("Conflict: $old => $val, $nam => $oldval")
false
end
function replace_suffix(out, dic, val, nam, suffix, pref, list)
for (suf, rep) in list
suffix == suf && return check_name(out, dic, val, pref * rep, nam)
end
false
end
#=
function replace_greek(out, dic, val, nam, off, pref, list)
for (suf, rep) in list
if nam[off:end] == suf
return check_name(out, dic, val, pref * rep, nam) |
check_name(out, dic, val, pref[1:end-1] * suf, nam)
end
end
false
end
=#
function replace_all(out, dic, val, nam, suffix, pref)
# replace_greek(out, dic, val, nam, off, pref * "G_", greek_letters) ||
# replace_greek(out, dic, val, nam, off, pref * "V_", var_greek) ||
replace_suffix(out, dic, val, nam, suffix, pref * "_", digits)
end
function shorten_names(names::Dict)
valtonam = Dict{String,Set{String}}()
for (nam, val) in names
# handle combining accents, change from 'accent{X}' to 'X-accent'
if !startswith(nam, "math") && sizeof(nam) > 3 &&
nam[end]%UInt8 == '}'%UInt8 && nam[end-2]%UInt8 == '{'%UInt8
ch = nam[end-1]%UInt8
if ch - 'A'%UInt8 < 0x1a || ch - 'a'%UInt8 < 0x1a || ch - '0'%UInt8 < 0xa
# tst = string(nam[end-1], '-', nam[1:end-3])
# check_name(valtonam, names, val, tst, nam)
add_name(valtonam, val, nam)
continue
end
end
# Special handling of "up"/"mup" prefixes
if startswith(nam, "up")
# Add it later when processing "mup" prefix if they have the same value
get(names, "m" * nam, "") == val && continue
elseif startswith(nam, "mup")
# If short form in table with same value, continue, otherwise, add short form
#upval = get(names, nam[2:end], "") # see if "up..." is in the table
#val == upval && continue # short name is already in table with same value
oldval = get(names, nam[4:end], "") # see if "..." is in the table
val == oldval && continue # short name is already in table with same value
check_name(valtonam, names, val, oldval == "" ? nam[4:end] : nam[2:end], nam)
continue
else
flg = false
nam in remove_name && continue
for (oldnam, newnam) in replace_name
if nam == oldnam
flg = check_name(valtonam, names, val, newnam, nam)
break
end
end
flg && continue
if nam[1] in remove_lead_char
for pref in remove_prefix
startswith(nam, pref) || continue
flg = true
tst = nam[sizeof(pref)+1:end]
oldval = get(names, tst, "")
oldval == val || (oldval == "" && add_name(valtonam, val, tst))
break
end
elseif nam[1] in replace_lead_char
for (pref, rep, repv7) in replace_prefix
startswith(nam, pref) || continue
suff = nam[sizeof(pref)+1:end]
flg = replace_all(valtonam, names, val, nam, suff, rep)
#=
if rep == "i"
flg = replace_all(valtonam, names, val, suff, "i")
elseif rep == "t" || rep == "d" || rep == "s" || rep == "c"
flg = replace_all(valtonam, names, val, suff, rep)
elseif rep == "" || rep[1] != 'b'
elseif rep == "b"
flg = replace_all(valtonam, names, val, suff, "b")
elseif rep == "sb"
flg = replace_all(valtonam, names, val, suff, "bs")
elseif rep == "ib"
flg = replace_all(valtonam, names, val, suff, "bi")
elseif rep == "cib"
flg = replace_all(valtonam, names, val, suff, "bic")
end
=#
flg || (flg = check_name(valtonam, names, val, rep * "_" * suff, nam))
#check_name(valtonam, names, val, repv7 * nam[sizeof(pref)+1:end], nam)
#flg = true
break
end
end
# Add short forms, if not already handled
flg && continue
end
# replace_suffix(valtonam, names, val, nam, 1, "G_", greek_letters) ||
# replace_suffix(valtonam, names, val, nam, 1, "V_", var_greek)
add_name(valtonam, val, nam)
end
# Split into two vectors
syms = Vector{String}()
vals = Vector{String}()
for (val, namset) in valtonam
for nam in namset
startswith(nam, "math") && length(namset) > 1 && continue
push!(syms, nam)
push!(vals, val)
end
end
syms, vals
end
function make_tables()
sym1, ver1, inf1 = get_math_symbols(dpath, fname)
sym2, ver2, inf2 = add_math_symbols(lpath, lname)
latex_sym = [mansym..., sym1[1], sym2, sym1[2:end]...]
et = (mantyp..., element_types[1], "tex", element_types[2:end]...)
latex_set = Dict{String,String}()
diff_set = Dict{String,Set{String}}()
# Select the first name found, ignore duplicates
for (ind, sym_set) in enumerate(latex_sym)
countdup = 0
countdiff = 0
for (nam, val) in sym_set
old = get(latex_set, nam, "")
if old == ""
push!(latex_set, nam => val)
elseif val == old
countdup += 1
else
countdiff += 1
if haskey(diff_set, nam)
push!(diff_set[nam], val)
else
push!(diff_set, nam => Set([old, val]))
end
end
end
println(countdup, " duplicates, ", countdiff, " overwritten out of ", length(sym_set),
" found in ", et[ind])
end
# Dump out set
disp[] && println("LaTeX set:\n", latex_set)
disp[] && println("Differences:\n", diff_set)
# Now, replace or remove prefixes and suffixes
symnam, symval = shorten_names(latex_set)
disp[] && println(length(symval), " distinct entities found\n", symnam)
# We want to build a table of all the names, sort them, then create a StrTable out of them
srtnam = sortperm(symnam)
srtval = symval[srtnam] # Values, sorted the same as srtnam
# BMP characters
l16 = Tuple{UInt16, UInt16}[]
# non-BMP characters (in range 0x10000 - 0x1ffff)
l32 = Tuple{UInt16, UInt16}[]
# two characters packed into UInt32, first character in high 16-bits
l2c = Tuple{UInt32, UInt16}[]
for i in eachindex(srtnam)
chrs = srtval[i]
len = length(chrs)
len > 2 && error("Too long sequence of characters $chrs")
ch1 = chrs[1]%UInt32
if len == 2
ch2 = chrs[end]%UInt32
(ch1 > 0x0ffff || ch2 > 0x0ffff) &&
error("Character $ch1 or $ch2 > 0xffff")
push!(l2c, (ch1<<16 | ch2, i))
elseif ch1 > 0x1ffff
error("Character $ch1 too large")
elseif ch1 > 0x0ffff
push!(l32, (ch1-0x10000, i))
else
push!(l16, (ch1%UInt16, i))
end
end
# We now have 3 vectors, for single BMP characters, for non-BMP characters, and for 2 BMP chars
# each has the value and a index into the name table
# We need to create a vector the same size as the name table, that gives the index
# of into one of the three tables, in order to go from names to 1 or 2 output characters
# We also need, for each of the 3 tables, a sorted vector that goes from the indices
# in each table to the index into the name table (so that we can find multiple names for
# each character)
indvec = create_vector(UInt16, length(srtnam))
vec16, ind16, base32 = sortsplit!(indvec, l16, 0)
vec32, ind32, base2c = sortsplit!(indvec, l32, base32)
vec2c, ind2c, basefn = sortsplit!(indvec, l2c, base2c)
((VER, string(now()), string(ver1, ",", ver2),
base32%UInt32, base2c%UInt32, StrTable(symnam[srtnam]), indvec,
vec16, ind16, vec32, ind32, vec2c, ind2c),
(ver1, ver2), (inf1, inf2))
end
savfile = joinpath(datapath, "latex.dat")
if isfile(savfile)
println("Tables already exist")
else
tup = nothing
println("Creating tables")
try
global tup
tup = make_tables()
catch ex
println(sprint(showerror, ex, catch_backtrace()))
end
println("Saving tables to ", savfile)
StrTables.save(savfile, tup[1])
println("Done")
end
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 320 | # Generate completions.json
using LaTeX_Entities
const LE = LaTeX_Entities
open("completions.json", "w") do io
println(io, "{")
def = LE.default
for nam in def.nam
println(io, " \", word, ""\": \"", LE.lookupname(def, nam), ""\",")
end
skip(io, -2)
println(io)
println(io, "}")
end
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 265 | # Generate julialatex.el file
using LaTeX_Entities
const LE = LaTeX_Entities
open("julialatex.el", "w") do io
def = LE.default
for nam in def.nam
println(io, "(puthash \"", word, "\" \"", LE.lookupname(def, nam), "\" julia-latexsubs)")
end
end
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 793 | # License is MIT: https://github.com/JuliaString/LaTeX_Entities/LICENSE.md
__precompile__()
"""
# Public API (nothing is exported)
* lookupname(str)
* matchchar(char)
* matches(str)
* longestmatches(str)
* completions(str)
"""
module LaTeX_Entities
using StrTables
VER = UInt32(1)
struct LaTeX_Table{T} <: AbstractEntityTable
ver::UInt32
tim::String
inf::String
base32::UInt32
base2c::UInt32
nam::StrTable{T}
ind::Vector{UInt16}
val16::Vector{UInt16}
ind16::Vector{UInt16}
val32::Vector{UInt16}
ind32::Vector{UInt16}
val2c::Vector{UInt32}
ind2c::Vector{UInt16}
end
function __init__()
global default =
LaTeX_Table(StrTables.load(joinpath(@__DIR__, "../data", "latex.dat"))...)
nothing
end
end # module LaTeX_Entities
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 8334 | # Parly derived from latex_symbols.jl, which is a part of Julia
# License is MIT: http://julialang.org/license
const greek_letters =
("Alpha" => "A",
"Beta" => "B",
"Gamma" => "G",
"Delta" => "D",
"Epsilon" => "E",
"Zeta" => "Z",
"Eta" => "H",
"Theta" => "J",
"Iota" => "I",
"Kappa" => "K",
"Lambda" => "L",
"Mu" => "M",
"Nu" => "N",
"Xi" => "X",
"Omicron" => "U",
"Pi" => "P",
"Rho" => "R",
"Sigma" => "S",
"Tau" => "T",
"Upsilon" => "Y",
"Phi" => "F",
"Chi" => "C",
"Psi" => "W",
"Omega" => "O",
"alpha" => "a",
"beta" => "b",
"gamma" => "g",
"delta" => "d",
"epsilon" => "e",
"zeta" => "z",
"eta" => "h",
"theta" => "j",
"iota" => "i",
"kappa" => "k",
"lambda" => "l",
"mu" => "m",
"nu" => "n",
"xi" => "x",
"omicron" => "u",
"pi" => "p",
"rho" => "r",
"sigma" => "s",
"tau" => "t",
"upsilon" => "y",
"phi" => "f",
"chi" => "c",
"psi" => "w",
"omega" => "o",
)
const var_greek =
("varTheta" => "J",
"nabla" => "n",
"partial" => "d", # partial differential
"varepsilon" => "e",
"varsigma" => "s",
"vartheta" => "j",
"varkappa" => "k",
"varphi" => "f",
"varrho" => "r",
"varpi" => "p"
)
const digits = (
"zero" => "0",
"one" => "1",
"two" => "2",
"three" => "3",
"four" => "4",
"five" => "5",
"six" => "6",
"seven" => "7",
"eight" => "8",
"nine" => "9"
)
const remove_lead_char = "AEt"
const remove_prefix = ("APL", "Elz", "Elx", "El", "textascii", "text")
const replace_lead_char = "Bm"
const replace_prefix =
(("Bbb", "d", "bb"), # double-struck or blackboard
("mbfsans", "sb", "bsans"), # bold sans-serif
("mbfscr", "cb", "bscr"), # bold cursive script
("mbffrak", "fb", "bfrak"), # bold fraktur
("mbfitsans", "sib", "bisans"), # bold italic sans-serif
("mbfit", "ib", "bi"), # bold italic
("mbf", "b", "bf"), # bold
("mfrak", "f", "frak"), # fraktur
("mitsans", "si", "isans"), # italic sans-serif
("mitBbb", "di", "bbi"), # italic double-struck (or blackboard)
("mit", "i", "it"), # italic
("msans", "s", "sans"), # sans-serif
("mscr", "c", "scr"), # cursive script
("mtt", "t", "tt") # teletype (monospaced)
)
const remove_name = ("Elxsqcup", "Elxuplus", "ElOr", "textTheta", "Elzbar")
const replace_name = (
"textasciiacute" => "textacute",
"textasciibreve" => "textbreve",
"textasciimacron" => "highminus",
"textphi" => "ltphi",
"Eulerconst" => "eulermascheroni",
"Hermaphrodite" => "hermaphrodite",
"Planckconst" => "planck",
)
const manual = [
"cbrt" => "\u221B", # synonym of \cuberoot
"mars" => "♂", # synonym of \male
"pprime" => "″", # synonym of \dprime
"ppprime" => "‴", # synonym of \trprime
"pppprime" => "⁗", # synonym of \qprime
"backpprime" => "‶", # synonym of \backdprime
"backppprime" => "‷", # synonym of \backtrprime
"emptyset" => "∅", # synonym of \varnothing
"llbracket" => "⟦", # synonym of \lBrack
"rrbracket" => "⟧", # synonym of \rBrack
"xor" => "⊻", # synonym of \veebar
"iff" => "⟺",
"implies" => "⟹",
"impliedby" => "⟸",
"to" => "→",
"euler" => "ℯ",
# Misc. Math and Physics
"del" => "∇", # synonym of \nabla (combining character)
"sout" => "\u0336", # synonym of \Elzbar (from ulem package)
"strike" => "\u0336", # synonym of \Elzbar
"zbar" => "\u0336", # synonym of \Elzbar
# Avoid getting "incorrect" synonym
"imath" => "ı",
"jmath" => "ȷ",
"i_imath" => "\U1d6a4", # mathematical italic small dotless i
"i_jmath" => "\U1d6a5", # mathematical italic small dotless j
"hbar" => "\u0127", # ħ synonym of \Elzxh
"AA" => "\u00c5", # Å
"Upsilon" => "\u03a5", # Υ
"setminus" => "\u2216", # ∖ synonym of \smallsetminus
"ddot{i}" => "\u00cf", # is ddot{\imath} in unicode.xml
"bigsetminus" => "\u29f5", # add to allow access to standard setminus
"circlearrowleft" => "\u21ba", # ↺ synonym of acwopencirclearrow
"circlearrowright" => "\u21bb", # ↻ synonym of cwopencirclearrow
"ohm" => "Ω",
"leq" => "≤", # synonym of le
"geq" => "≥", # synonym of ge
"bbsemi" => "⨟",
"ith" => "ℎ", # mathematical italic small h (planck constant)
"tricolon" => "⁝", # tricolon
"join" => "⨝", # synonym of Join
]
# Vulgar fractions
const fractions = [
"1/4" => "¼", # vulgar fraction one quarter
"1/2" => "½", # vulgar fraction one half
"3/4" => "¾", # vulgar fraction three quarters
"1/7" => "⅐",# vulgar fraction one seventh
"1/9" => "⅑", # vulgar fraction one ninth
"1/10" => "⅒", # vulgar fraction one tenth
"1/3" => "⅓", # vulgar fraction one third
"2/3" => "⅔", # vulgar fraction two thirds
"1/5" => "⅕", # vulgar fraction one fifth
"2/5" => "⅖", # vulgar fraction two fifths
"3/5" => "⅗", # vulgar fraction three fifths
"4/5" => "⅘", # vulgar fraction four fifths
"1/6" => "⅙", # vulgar fraction one sixth
"5/6" => "⅚", # vulgar fraction five sixths
"1/8" => "⅛", # vulgar fraction one eigth
"3/8" => "⅜", # vulgar fraction three eigths
"5/8" => "⅝", # vulgar fraction five eigths
"7/8" => "⅞", # vulgar fraction seventh eigths
"1/" => "⅟", # fraction numerator one
"0/3" => "↉", # vulgar fraction zero thirds
"1/4" => "¼", # vulgar fraction one quarter
]
const superscripts = [
"^0" => "⁰",
"^1" => "¹",
"^2" => "²",
"^3" => "³",
"^4" => "⁴",
"^5" => "⁵",
"^6" => "⁶",
"^7" => "⁷",
"^8" => "⁸",
"^9" => "⁹",
"^+" => "⁺",
"^-" => "⁻",
"^=" => "⁼",
"^(" => "⁽",
"^)" => "⁾",
"^a" => "ᵃ",
"^b" => "ᵇ",
"^c" => "ᶜ",
"^d" => "ᵈ",
"^e" => "ᵉ",
"^f" => "ᶠ",
"^g" => "ᵍ",
"^h" => "ʰ",
"^i" => "ⁱ",
"^j" => "ʲ",
"^k" => "ᵏ",
"^l" => "ˡ",
"^m" => "ᵐ",
"^n" => "ⁿ",
"^o" => "ᵒ",
"^p" => "ᵖ",
"^r" => "ʳ",
"^s" => "ˢ",
"^t" => "ᵗ",
"^u" => "ᵘ",
"^v" => "ᵛ",
"^w" => "ʷ",
"^x" => "ˣ",
"^y" => "ʸ",
"^z" => "ᶻ",
"^A" => "ᴬ",
"^B" => "ᴮ",
"^D" => "ᴰ",
"^E" => "ᴱ",
"^G" => "ᴳ",
"^H" => "ᴴ",
"^I" => "ᴵ",
"^J" => "ᴶ",
"^K" => "ᴷ",
"^L" => "ᴸ",
"^M" => "ᴹ",
"^N" => "ᴺ",
"^O" => "ᴼ",
"^P" => "ᴾ",
"^R" => "ᴿ",
"^T" => "ᵀ",
"^U" => "ᵁ",
"^V" => "ⱽ",
"^W" => "ᵂ",
"^alpha" => "ᵅ",
"^beta" => "ᵝ",
"^gamma" => "ᵞ",
"^delta" => "ᵟ",
"^epsilon" => "ᵋ",
"^theta" => "ᶿ",
"^iota" => "ᶥ",
"^phi" => "ᵠ",
"^chi" => "ᵡ",
"^Phi" => "ᶲ",
"^uparrow" => "ꜛ",
"^downarrow" => "ꜜ",
"^!" => "ꜝ",
]
const subscripts = [
"_0" => "₀",
"_1" => "₁",
"_2" => "₂",
"_3" => "₃",
"_4" => "₄",
"_5" => "₅",
"_6" => "₆",
"_7" => "₇",
"_8" => "₈",
"_9" => "₉",
"_+" => "₊",
"_-" => "₋",
"_=" => "₌",
"_(" => "₍",
"_)" => "₎",
"_a" => "ₐ",
"_e" => "ₑ",
"_h" => "ₕ",
"_i" => "ᵢ",
"_j" => "ⱼ",
"_k" => "ₖ",
"_l" => "ₗ",
"_m" => "ₘ",
"_n" => "ₙ",
"_o" => "ₒ",
"_p" => "ₚ",
"_r" => "ᵣ",
"_s" => "ₛ",
"_t" => "ₜ",
"_u" => "ᵤ",
"_v" => "ᵥ",
"_x" => "ₓ",
"_schwa" => "ₔ",
"_beta" => "ᵦ",
"_gamma" => "ᵧ",
"_rho" => "ᵨ",
"_phi" => "ᵩ",
"_chi" => "ᵪ"
]
const mansym = [manual, fractions, superscripts, subscripts]
const mantyp = ["manual", "fractions", "superscripts", "subscripts"]
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | code | 2154 | using StrTables, LaTeX_Entities
using Test
const def = LaTeX_Entities.default
# Test the functions lookupname, matches, longestmatches, completions
# Check that characters from all 3 tables (BMP, non-BMP, 2 character) are tested
@testset "LaTeX_Entities" begin
@testset "lookupname" begin
@test lookupname(def, SubString("My name is Spock", 12)) == ""
@test lookupname(def, "foobar") == ""
@test lookupname(def, "dagger") == "†" # \u2020
#@test lookupname(def, "mscrl") == "𝓁" # \U1f4c1
@test lookupname(def, "c_l") == "𝓁" # \U1f4c1
@test lookupname(def, "nleqslant") == "⩽̸" # \u2a7d\u338
end
@testset "matches" begin
@test isempty(matches(def, ""))
@test isempty(matches(def, "\U1f596"))
@test isempty(matches(def, SubString("My name is \U1f596", 12)))
for (chrs, exp) in (("√", ["sqrt", "surd"]),
#("𝓁", ["mscrl"]),
("𝓁", ["c_l"]),
("⩽̸", ["nleqslant"]))
res = matches(def, chrs)
@test length(res) >= length(exp)
@test intersect(res, exp) == exp
end
end
@testset "longestmatches" begin
@test isempty(longestmatches(def, "\U1f596 abcd"))
@test isempty(longestmatches(def, SubString("My name is \U1f596", 12)))
for (chrs, exp) in (("√abcd", ["sqrt", "surd"]),
#("𝓁abcd", ["mscrl"]),
("𝓁abcd", ["c_l"]),
("⩽̸abcd", ["nleqslant"]))
res = longestmatches(def, chrs)
@test length(res) >= length(exp)
@test intersect(res, exp) == exp
end
end
@testset "completions" begin
@test isempty(completions(def, "ScottPaulJones"))
@test isempty(completions(def, SubString("My name is Scott", 12)))
for (chrs, exp) in (("A", ["AA", "AE", "Alpha"]),
#("mtt", ["mtta", "mttthree", "mttzero"]),
("varp", ["varperspcorrespond", "varphi", "varpi"]),
("nleq", ["nleq", "nleqslant"]))
res = completions(def, chrs)
@test length(res) >= length(exp)
@test intersect(res, exp) == exp
end
end
end
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"MIT"
] | 1.0.2 | 6a23aacdb1e4b59282fe076b227b74d7f14400a1 | docs | 1952 | # LaTeX_Entities
## Support for using LaTeX entity names for characters
[pkg-url]: https://github.com/JuliaString/LaTeX_Entities.jl.git
[julia-url]: https://github.com/JuliaLang/Julia
[julia-release]:https://img.shields.io/github/release/JuliaLang/julia.svg
[release]: https://img.shields.io/github/release/JuliaString/LaTeX_Entities.jl.svg
[release-date]: https://img.shields.io/github/release-date/JuliaString/LaTeX_Entities.jl.svg
[license-img]: http://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat
[license-url]: LICENSE.md
[gitter-img]: https://badges.gitter.im/Join%20Chat.svg
[gitter-url]: https://gitter.im/JuliaString/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
[travis-url]: https://travis-ci.org/JuliaString/LaTeX_Entities.jl
[travis-s-img]: https://travis-ci.org/JuliaString/LaTeX_Entities.jl.svg
[travis-m-img]: https://travis-ci.org/JuliaString/LaTeX_Entities.jl.svg?branch=master
[codecov-url]: https://codecov.io/gh/JuliaString/LaTeX_Entities.jl
[codecov-img]: https://codecov.io/gh/JuliaString/LaTeX_Entities.jl/branch/master/graph/badge.svg
[contrib]: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat
[![][release]][pkg-url] [![][release-date]][pkg-url] [![][license-img]][license-url] [![contributions welcome][contrib]](https://github.com/JuliaString/LaTeX_Entities.jl/issues)
| **Julia Version** | **Unit Tests** | **Coverage** |
|:------------------:|:------------------:|:---------------------:|
| [![][julia-release]][julia-url] | [![][travis-s-img]][travis-url] | [![][codecov-img]][codecov-url]
| Julia Latest | [![][travis-m-img]][travis-url] | [![][codecov-img]][codecov-url]
This builds tables for looking up LaTeX names and returning the Unicode character(s),
looking up a character or pair of characters and finding LaTeX names that return it/them,
and finding all of the LaTeX name completions for a particular string, if any.
| LaTeX_Entities | https://github.com/JuliaString/LaTeX_Entities.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | c14d0b2e7f19374017a2b5b6dfe48c5723c791ae | code | 1245 | module AADocs
using Documenter, AcousticAnalogies
function doit()
IN_CI = get(ENV, "CI", nothing)=="true"
makedocs(sitename="AcousticAnalogies.jl", modules=[AcousticAnalogies], doctest=false,
root=@__DIR__,
format=Documenter.HTML(prettyurls=IN_CI),
pages=["Introduction"=>"index.md",
"Guided Example"=>"guided_example.md",
"CCBlade.jl Example"=>"ccblade_example.md",
"WriteVTK.jl Support"=>"writevtk_support.md",
"OpenFAST Example"=>"openfast_example.md",
"API Reference"=>"api.md",
"Software Quality Assurance"=>"sqa.md",
"BPM Airfoil Self-Noise Tests"=>"bpm_tests1.md",
"BPM Airfoil Self-Noise Tests, Cont."=>"bpm_tests2.md",
"BPM Airfoil Self-Noise Tests, Cont."=>"bpm_tests3.md",
"Ideally Twisted Rotor Tests"=>"itr_tests1.md",
"Ideally Twisted Rotor Tests, Cont."=>"itr_tests2.md",
])
if IN_CI
deploydocs(repo="github.com/OpenMDAO/AcousticAnalogies.jl.git", devbranch="main")
end
end
if !isinteractive()
doit()
end
end # module
| AcousticAnalogies | https://github.com/OpenMDAO/AcousticAnalogies.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | c14d0b2e7f19374017a2b5b6dfe48c5723c791ae | code | 8252 | using AcousticAnalogies
using BenchmarkTools
using DelimitedFiles
using FLOWMath: linear, akima
using Interpolations
using KinematicCoordinateTransformations
using LinearAlgebra: ×
using Printf: @sprintf
using StaticArrays
include(joinpath(@__DIR__, "../test/gen_test_data/gen_ccblade_data/constants.jl"))
const paramsfile = joinpath(@__DIR__, "params-current.json")
const resultsfile = joinpath(@__DIR__, "results-current.json")
function get_dradii(radii, Rhub, Rtip)
# How do I get the radial spacing? Well, for the inner elements, I'll just
# assume that the interfaces are midway in between the centers.
r_interface = 0.5.*(radii[1:end-1] .+ radii[2:end])
# Then just say that the blade begins at Rhub, and ends at Rtip.
r_interface = vcat([Rhub], r_interface, [Rtip])
# And now the distance between interfaces is the spacing.
dradii = r_interface[2:end] .- r_interface[1:end-1]
return dradii
end
function run_current(; load_params=true, save_params=false)
rpm = 2200.0 # rev/min
omega = rpm*(2*pi/60.0)
# Get the normal and circumferential loading from the CCBlade output.
i = 11
fname = joinpath(@__DIR__, "../test/gen_test_data/gen_ccblade_data/ccblade_omega$(@sprintf "%02d" i).csv")
data = DelimitedFiles.readdlm(fname, ',')
fn = data[:, 1]
fc = data[:, 2]
# Blade passing period.
bpp = 2*pi/omega/num_blades
num_src_times = 256
num_obs_times = 2*num_src_times
num_radial = length(radii)
num_sources = num_blades*num_radial
obs_time_range = 4.0*bpp
dradii = get_dradii(radii, Rhub, Rtip)
cs_area = area_over_chord_squared .* chord.^2
# Observer angle, rad. Zero is sideline (in the rotor plane of rotation).
theta = 0.0
x0 = [cos(theta)*100*12*0.0254, 0.0, sin(theta)*100*12*0.0254] # 100 ft in meters
# For the moving observer, is at x0 at time t0, moving with constant
# velocity v0_hub.
t0 = 0.0
v0_hub = v.*[0.0, 0.0, 1.0]
obs_stationary = StationaryAcousticObserver(SVector{3}(x0))
obs_moving = ConstVelocityAcousticObserver(t0, SVector{3}(x0), SVector{3}(v0_hub))
apth = Array{AcousticPressure{Float64, Float64, Float64}, 3}(undef, num_src_times, num_radial, num_blades)
apth_total = AcousticPressure(zeros(Float64, num_obs_times), zeros(Float64, num_obs_times), zeros(Float64, num_obs_times))
linear_interpolations_jl(t_cp, p_cp, t) = interpolate((t_cp,), p_cp, Gridded(Linear())).(t)
suite = BenchmarkGroup()
s_s = suite["stationary"] = BenchmarkGroup()
s_s["linear"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_stationary, $apth, $apth_total, $linear)
s_s["akima"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_stationary, $apth, $apth_total, $akima)
s_s["linear_interpolations_jl"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_stationary, $apth, $apth_total, $linear_interpolations_jl)
s_m = suite["moving"] = BenchmarkGroup()
s_m["linear"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_moving, $apth, $apth_total, $linear)
s_m["akima"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_moving, $apth, $apth_total, $akima)
s_m["linear_interpolations_jl"] = @benchmarkable run_cf1a($num_blades, $v, $omega, $radii, $dradii, $cs_area, $fn, $fc, $obs_time_range, $obs_moving, $apth, $apth_total, $linear_interpolations_jl)
if load_params && isfile(paramsfile)
# Load the benchmark parameters.
# https://github.com/JuliaCI/BenchmarkTools.jl/blob/master/doc/manual.md#caching-parameters
loadparams!(suite, BenchmarkTools.load(paramsfile)[1])
# Also need to warmup the benchmarks to get rid of the JIT overhead
# (when not using tune!):
# https://discourse.julialang.org/t/benchmarktools-theory-and-practice/5728
warmup(suite, verbose=false)
else
tune!(suite, verbose=false)
end
results = run(suite, verbose=false)
if save_params
BenchmarkTools.save(paramsfile, params(suite))
end
return suite, results
end
function run_cf1a(num_blades, v, omega, radii, dradii, cs_area, fn, fc, obs_time_range, obs, apth, apth_total, f_interp)
# This is the same as kinematic_trans_pipe, except the un-transformed SourceElement2
# objects are never assigned to an intermediate variable.
t0 = 0.0
rot_axis = @SVector [0.0, 0.0, 1.0]
blade_axis = @SVector [0.0, 1.0, 0.0]
y0_hub = @SVector [0.0, 0.0, 0.0] # m
v0_hub = v.*rot_axis
num_radial = length(radii)
num_src_times = size(apth, 1)
# Blade Passing Period.
bpp = 2*pi/omega/num_blades
src_time_range = 5.0*bpp
rot_trans = SteadyRotXTransformation(t0, omega, 0.0)
global_trans = ConstantLinearMap(hcat(rot_axis, blade_axis, rot_axis×blade_axis))
const_vel_trans = ConstantVelocityTransformation(t0, y0_hub, v0_hub)
# This is just an array of the angular offsets of each blade.
θs = 2*pi/num_blades.*(0:(num_blades-1))
dt = src_time_range/(num_src_times - 1)
src_times = t0 .+ (0:num_src_times-1).*dt
# Reshape for broadcasting.
θs = reshape(θs, 1, 1, num_blades)
radii = reshape(radii, 1, num_radial, 1)
dradii = reshape(dradii, 1, num_radial, 1)
cs_area = reshape(cs_area, 1, num_radial, 1)
fn = reshape(fn, 1, num_radial, 1)
fc = reshape(fc, 1, num_radial, 1)
src_times = reshape(src_times, num_src_times, 1, 1) # This isn't really necessary.
# Get all the transformations!
trans = compose.(src_times, Ref(const_vel_trans), compose.(src_times, Ref(global_trans), Ref(rot_trans)))
# Transform the source elements.
ses = CompactSourceElement.(rho, c0, radii, θs, dradii, cs_area, fn, fc, src_times) .|> trans
# Do the acoustics.
apth .= f1a.(ses, Ref(obs))
# Get the common observer time.
common_obs_time!(apth_total.t, apth, obs_time_range, 1)
# Combine all the sources into one acoustic pressure time history.
combine!(apth_total, apth, 1; f_interp=f_interp)
return apth_total.t, apth_total.p_m, apth_total.p_d
end
function compare_old(; load_params=true, save_params=false, save_results=false)
suite, results_new = run_current(load_params=load_params, save_params=save_params)
if isfile(resultsfile)
results_old = BenchmarkTools.load(resultsfile)[1]
println("Stationary observer, FLOWMath linear interpolation:")
rold = results_old["stationary"]["linear"]
rnew = results_new["stationary"]["linear"]
display(judge(median(rnew), median(rold)))
println("Stationary observer, FLOWMath Akima spline interpolation:")
rold = results_old["stationary"]["akima"]
rnew = results_new["stationary"]["akima"]
display(judge(median(rnew), median(rold)))
println("Stationary observer, Interpolations.jl linear interpolation:")
rold = results_old["stationary"]["linear_interpolations_jl"]
rnew = results_new["stationary"]["linear_interpolations_jl"]
display(judge(median(rnew), median(rold)))
println("Moving observer, FLOWMath linear interpolation:")
rold = results_old["moving"]["linear"]
rnew = results_new["moving"]["linear"]
display(judge(median(rnew), median(rold)))
println("Moving observer, FLOWMath Akima spline interpolation:")
rold = results_old["moving"]["akima"]
rnew = results_new["moving"]["akima"]
display(judge(median(rnew), median(rold)))
println("Moving observer, Interpolations.jl linear interpolation:")
rold = results_old["moving"]["linear_interpolations_jl"]
rnew = results_new["moving"]["linear_interpolations_jl"]
display(judge(median(rnew), median(rold)))
end
if save_results
BenchmarkTools.save(resultsfile, results_new)
end
return suite, results_new
end
if !isinteractive()
compare_old(; load_params=true, save_params=false, save_results=false)
end
| AcousticAnalogies | https://github.com/OpenMDAO/AcousticAnalogies.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | c14d0b2e7f19374017a2b5b6dfe48c5723c791ae | code | 2422 | module AcousticAnalogies
using AcousticMetrics: AcousticMetrics
using CCBlade: CCBlade
using CSV: CSV
using DataFrames: DataFrames
using FillArrays: Fill
using FLOWMath: FLOWMath, akima, linear, ksmax, norm_cs_safe, dot_cs_safe, atan_cs_safe, abs_cs_safe
using FlexiMaps: mapview
using Format: format, FormatExpr
using JuliennedArrays: JuliennedArrays
using KinematicCoordinateTransformations: KinematicTransformation, SteadyRotXTransformation, ConstantVelocityTransformation, compose
using LinearAlgebra: cross, norm, mul!
using Meshes: Meshes
using StaticArrays: @SVector, SVector
using Statistics: mean
using WriteVTK: WriteVTK
include("utils.jl")
export get_dradii
include("abstract_source_elements.jl")
export AbstractCompactSourceElement
include("observers.jl")
export AbstractAcousticObserver, StationaryAcousticObserver, ConstVelocityAcousticObserver
include("advance_time.jl")
export adv_time
include("boundary_layers.jl")
export AbstractBoundaryLayer, TrippedN0012BoundaryLayer, UntrippedN0012BoundaryLayer
include("f1a.jl")
export CompactF1ASourceElement
export F1AOutput, F1APressureTimeHistory
export noise
export common_obs_time
export combine!, combine
include("abstract_broadband.jl")
include("tbl_te.jl")
export TBLTESourceElement
include("lbl_vs.jl")
export LBLVSSourceElement
include("tip_vortex.jl")
export AbstractTipAlphaCorrection, NoTipAlphaCorrection, BPMTipAlphaCorrection, BMTipAlphaCorrection, SmoothBMTipAlphaCorrection
export AbstractBladeTip, RoundedTip, FlatTip
export TipVortexSourceElement
include("teb_vs.jl")
export TEBVSSourceElement
include("combined_broadband.jl")
export CombinedNoTipBroadbandSourceElement, CombinedWithTipBroadbandSourceElement
export pbs_suction, pbs_pressure, pbs_alpha, pbs_teb, pbs_tip
include("ccblade_helpers.jl")
export f1a_source_elements_ccblade, tblte_source_elements_ccblade, lblvs_source_elements_ccblade, tebvs_source_elements_ccblade, tip_vortex_source_elements_ccblade, combined_broadband_source_elements_ccblade
include("bpm_test_utils.jl")
include("openfast_helpers.jl")
export AbstractTimeDerivMethod, NoTimeDerivMethod, SecondOrderFiniteDiff, calculate_loading_dot!
export AbstractRadialInterpMethod, FLOWLinearInterp, FLOWAkimaInterp, interpolate_to_cell_centers!
export OpenFASTData, read_openfast_file, f1a_source_elements_openfast
include("writevtk.jl")
export to_paraview_collection
include("deprecated.jl")
end # module
| AcousticAnalogies | https://github.com/OpenMDAO/AcousticAnalogies.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | c14d0b2e7f19374017a2b5b6dfe48c5723c791ae | code | 17482 | abstract type AbstractDirectivity end
struct BPMDirectivity <: AbstractDirectivity end
struct BrooksBurleyDirectivity <: AbstractDirectivity end
abstract type AbstractBroadbandSourceElement{TDirect,TUInduction,TMachCorrection,TDoppler} <: AbstractCompactSourceElement end
"""
doppler_factor(se::AbstractBroadbandSourceElement, obs::AbstractAcousticObserver, t_obs)
Calculate the Doppler shift factor for noise emitted by source element `se` and recieved by observer `obs` at time `t_obs`, i.e. the ratio between an observer frequency `f` and emitted frequency `f_0`.
The correct value for `t_obs` can be found using [`adv_time`](@ref).
"""
function doppler_factor(se::AbstractBroadbandSourceElement{TDirect,TUInduction,TMachCorrection,true}, obs::AbstractAcousticObserver, t_obs) where {TDirect,TUInduction,TMachCorrection}
# Location of the observer at the observer time.
x_obs = obs(t_obs)
# Also need the speed of sound.
c = speed_of_sound(se)
# Get a unit vector pointing from the source position at the source time to the observer position at the observer time.
rv = x_obs .- position(se)
r = norm_cs_safe(rv)
rhat = rv/r
# So, now, if I dot the source velocity with `rhat`, that would give me the component of velocity of the source in the direction of the observer, positive if moving toward it, negative if moving away.
v_src = dot_cs_safe(velocity(se), rhat)
# And, if I dot the observer velocity `rhat`, that will give me the component of velocity of the observer in the direction of the source, positive if moving *away* from it, negative if moving toward.
v_obs = dot_cs_safe(velocity(t_obs, obs), rhat)
# Now we can get the factor.
factor = (1 - v_obs/c) / (1 - v_src/c)
return factor
end
function doppler_factor(se::AbstractBroadbandSourceElement{TDirect,TUInduction,TMachCorrection,false}, obs::AbstractAcousticObserver, t_obs) where {TDirect,TUInduction,TMachCorrection}
return 1
end
"""
doppler_factor(se::AbstractBroadbandSourceElement, obs::AbstractAcousticObserver)
Calculate the Doppler shift factor for noise emitted by source element `se` and recieved by observer `obs`, i.e. the ratio between an observer frequency `f` and emitted frequency `f_0`.
The correct value for `t_obs` will be found using [`adv_time`](@ref) internally.
"""
function doppler_factor(se::AbstractBroadbandSourceElement, obs::AbstractAcousticObserver)
# Do the advanced time calculation.
t_obs = adv_time(se, obs)
return doppler_factor(se, obs, t_obs)
end
function directivity(se::AbstractBroadbandSourceElement{BrooksBurleyDirectivity}, x_obs, top_is_suction)
# Position vector from source to observer.
rv = x_obs .- se.y0dot
# Distance from source to observer.
r_er = norm_cs_safe(rv)
# Unit vector normal to both the span and chord directions.
# Does the order matter?
# Doesn't look like it, since we're only using it to find z_er, which we square.
# But let's do it right, anyway!
# if se.chord_cross_span_to_get_top_uvec
# # But, if the angle of attack is negative, then the "top" of the airfoil (which is normally the suction side) is actually the suction side.
# if top_is_suction
# z_uvec_tmp = cross(se.chord_uvec, se.span_uvec)
# else
# z_uvec_tmp = cross(se.span_uvec, se.chord_uvec)
# end
# else
# if top_is_suction
# z_uvec_tmp = cross(se.span_uvec, se.chord_uvec)
# else
# z_uvec_tmp = cross(se.chord_uvec, se.span_uvec)
# end
# end
z_uvec_tmp = cross(se.chord_uvec, se.span_uvec)*ifelse(se.chord_cross_span_to_get_top_uvec, 1, -1)*ifelse(top_is_suction, 1, -1)
z_uvec = z_uvec_tmp / norm_cs_safe(z_uvec_tmp)
# Component of rv along the chord line (see Figure 11 in Brooks and Burley AIAA 2001-2210).
x_er = dot_cs_safe(rv, se.chord_uvec)
# Component of rv along the span line (see Figure 11 in Brooks and Burley AIAA 2001-2210).
y_er = dot_cs_safe(rv, se.span_uvec)
# Component of rv in the direction normal to both span and chord (see Figure 11 in Brooks and Burley AIAA 2001-2210).
z_er = dot_cs_safe(rv, z_uvec)
# Need to find sin(Θ_er)^2, where Θ_er = acos(x_er/r_er), equation (21) from Brooks and Burley AIAA 2001-2210.
# But sin(acos(x_er/r_er)) = sqrt(r_er^2 - x_er^2)/r_er, and so sin(Θ_er)^2 = (r_er^2 - x_er^2)/r_er^2
sin2Θer = (r_er^2 - x_er^2)/r_er^2
# Need to find sin(Φ_er)^2, where Φ_er = acos(y_er/sqrt(y_er^2 + z_er^2)), equation (21) from Brooks and Burley AIAA 2001-2210.
# But sin(acos(y_er/sqrt(y_er^2 + z_er^2))) = z_er/sqrt(y_er^2 + z_er^2), and so sin(Φ_er)^2 = z_er^2/(y_er_^2 + z_er^2).
sin2Φer = (z_er^2)/(y_er^2 + z_er^2)
# Need to find 2*sin(0.5*Θ_er)^2, where Θ_er = acos(x_er/r_er), equation (21) from Brooks and Burley AIAA 2001-2210.
# But there is a half-angle identity that says sin(θ/2)^2 = 0.5*(1 - cos(θ)).
# So I actually want 2*sin(0.5*Θ_er)^2 = 2*0.5*(1 - cos(Θ_er)) = (1 - cos(Θ_er)).
# But I can substitute in Θ_er = acos(x_er/r_er) and get 2*sin(0.5*Θ_er)^2 = 1 - x_er/r_er.
twosin2halfΘer = 1 - x_er/r_er
# Now just need the denominator: (1 - M_tot*cos(ξR))^4.
# M_tot is the "total" velocity from... hmm... what perspective?
# Let's see... it looks like it's suppose to be from the fluid, aka the global frame.
# The definition is Brooks and Burley AIAA 2001-2210, equation (14):
#
# V_tot = V - V_wt - V_ind
#
# where
#
# * V is the velocity due to the rotation of the blade element
# * V_wt is the wind tunnel velocity, which is positive when it goes against the motion of the blade element.
# * V_ind is "the induced velocity due to the near and far wake of the rotor," and appears to be positive in roughly the thrust direction.
#
# So if I calculate V - V_wt, that's the "actual" velocity of the blade element, i.e., the velocity of the blade element relative to the fluid far away from the blade element, since it doesn't include the induced velocity.
# That's what I usually think of as the "actual" velocity, since it's what a stationary observer would observe on a calm day.
# But when we add in the induced velocity, I think what we're finding is the velocity of the blade element relative to the nearfield velocity.
# Cool.
# But does that mean I add or subtract `se.y1dot_fluid` from `se.y1dot`?
# Well, let's think about that.
# First, let's say I start with stuff in the blade-fixed frame.
# And let's say I'm imagining that, from the global frame, I'm assuming the
# blade element is moving in the positive x direction, initially aligned
# with the y axis
# So I think all I need to do is just use se.y1dot_fluid + se.y1dot.
# Now, cos(ξ_r) is defined by equation (18) in Brooks and Burley AIAA 2001-2210, which is the angle between the radiation vector (rv here) and the total velocity (se.y1dot here).
# But I can simplify that by just finding the unit radiation vector, then dotting that with the velocity vector, and dividing by c0.
# Unit radiation vector.
r_uvec = rv./r_er
# Equation 14 from Brooks and Burley AIAA 2001-2210.
Vtotal = se.y1dot - se.y1dot_fluid
# Mach number vectory in the direction of the radiation vector.
Mtotcosξr = dot_cs_safe(Vtotal, r_uvec)/se.c0
# Convective amplification factor for the two directivity functions.
conv_amp = 1/(1 - Mtotcosξr)^4
# Now I can finally find the directivity function!
# Equation (19) from Brooks and Burley AIAA 2001-2210.
# Dl = (sin2Θer*sin2Φer)/(1 - Mtotcosξr)^4
Dl = (sin2Θer*sin2Φer)*conv_amp
# Now I can finally find the directivity function!
# Equation (20) from Brooks and Burley AIAA 2001-2210.
# Dh = (twosin2halfΘer*sin2Φer)/(1 - Mtotcosξr)^4
Dh = (twosin2halfΘer*sin2Φer)*conv_amp
return r_er, Dl, Dh
end
function directivity(se::AbstractBroadbandSourceElement{BPMDirectivity}, x_obs, top_is_suction)
# Position vector from source to observer.
rv = x_obs .- se.y0dot
# Distance from source to observer.
r_er = norm_cs_safe(rv)
# So, the BPM report uses the local flow velocity, not the chord line, to define the x direction.
# So, I want to get a unit vector in that direction.
# Should it include induction?
# It won't matter for comparing to the data in the BPM report, since the flow including and excluding induction would be in the same direction.
# In the BPM report Appendix B, the x direction is defined as the opposite of the motion of the source element/flat plate, so I guess I won't use induction.
# But I want the velocity to be normal to the span direction, so let's remove_that.
# So, want the x direction to be opposite the velocity of the source element.
x_vec_tmp1 = -se.y1dot
# Then we want to remove any part of the velocity in the direction of the span.
x_vec_tmp2 = x_vec_tmp1 - dot_cs_safe(x_vec_tmp1, se.span_uvec)*x_vec_tmp1
# Now make it a unit vector:
x_uvec = x_vec_tmp2 / norm_cs_safe(x_vec_tmp2)
# Unit vector normal to both the span and chord directions.
# Does the order matter?
# Doesn't look like it, since we're only using it to find z_er, which we square.
# But it's supposed to be pointing from the pressure to the suction side, which we can figure out, so let's do it the right way.
# if se.chord_cross_span_to_get_top_uvec
# if top_is_suction
# z_uvec_tmp = cross(x_uvec, se.span_uvec)
# else
# z_uvec_tmp = cross(se.span_uvec, x_uvec)
# end
# else
# if top_is_suction
# z_uvec_tmp = cross(se.span_uvec, x_uvec)
# else
# z_uvec_tmp = cross(x_uvec, se.span_uvec)
# end
# end
z_uvec_tmp = cross(x_uvec, se.span_uvec)*ifelse(se.chord_cross_span_to_get_top_uvec, 1, -1)*ifelse(top_is_suction, 1, -1)
z_uvec = z_uvec_tmp / norm_cs_safe(z_uvec_tmp)
# Component of rv along the chord line (see Figure B3 in the BPM report).
x_er = dot_cs_safe(rv, x_uvec)
# Component of rv along the span line (see Figure B3 the BPM report).
y_er = dot_cs_safe(rv, se.span_uvec)
# Component of rv in the direction normal to both span and chord (see Figure 11 in Brooks and Burley AIAA 2001-2210).
z_er = dot_cs_safe(rv, z_uvec)
# Need to find sin(Θ_er)^2, where Θ_er = acos(x_er/r_er), equation (21) from Brooks and Burley AIAA 2001-2210.
# But sin(acos(x_er/r_er)) = sqrt(r_er^2 - x_er^2)/r_er, and so sin(Θ_er)^2 = (r_er^2 - x_er^2)/r_er^2
sin2Θer = (r_er^2 - x_er^2)/r_er^2
# Need to find sin(Φ_er)^2, where Φ_er = acos(y_er/sqrt(y_er^2 + z_er^2)), equation (21) from Brooks and Burley AIAA 2001-2210.
# But sin(acos(y_er/sqrt(y_er^2 + z_er^2))) = z_er/sqrt(y_er^2 + z_er^2), and so sin(Φ_er)^2 = z_er^2/(y_er_^2 + z_er^2).
sin2Φer = (z_er^2)/(y_er^2 + z_er^2)
# Need to find 2*sin(0.5*Θ_er)^2, where Θ_er = acos(x_er/r_er), equation (21) from Brooks and Burley AIAA 2001-2210.
# But there is a half-angle identity that says sin(θ/2)^2 = 0.5*(1 - cos(θ)).
# So I actually want 2*sin(0.5*Θ_er)^2 = 2*0.5*(1 - cos(Θ_er)) = (1 - cos(Θ_er)).
# But I can substitute in Θ_er = acos(x_er/r_er) and get 2*sin(0.5*Θ_er)^2 = 1 - x_er/r_er.
twosin2halfΘer = 1 - x_er/r_er
# Now just need the denominator: (1 - M_tot*cos(ξR))^4.
# M_tot is the "total" velocity from... hmm... what perspective?
# Let's see... it looks like it's suppose to be from the fluid, aka the global frame.
# The definition is Brooks and Burley AIAA 2001-2210, equation (14):
#
# V_tot = V - V_wt - V_ind
#
# where
#
# * V is the velocity due to the rotation of the blade element
# * V_wt is the wind tunnel velocity, which is positive when it goes against the motion of the blade element.
# * V_ind is "the induced velocity due to the near and far wake of the rotor," and appears to be positive in roughly the thrust direction.
#
# So if I calculate V - V_wt, that's the "actual" velocity of the blade element, i.e., the velocity of the blade element relative to the fluid far away from the blade element, since it doesn't include the induced velocity.
# That's what I usually think of as the "actual" velocity, since it's what a stationary observer would observe on a calm day.
# But when we add in the induced velocity, I think what we're finding is the velocity of the blade element relative to the nearfield velocity.
# Cool.
# But does that mean I add or subtract `se.y1dot_fluid` from `se.y1dot`?
# Well, let's think about that.
# First, let's say I start with stuff in the blade-fixed frame.
# And let's say I'm imagining that, from the global frame, I'm assuming the
# blade element is moving in the positive x direction, initially aligned
# with the y axis
# So I think all I need to do is just use se.y1dot_fluid + se.y1dot.
# Now, cos(ξ_r) is defined by equation (18) in Brooks and Burley AIAA 2001-2210, which is the angle between the radiation vector (rv here) and the total velocity (se.y1dot here).
# But I can simplify that by just finding the unit radiation vector, then dotting that with the velocity vector, and dividing by c0.
# Unit radiation vector.
r_uvec = rv./r_er
# For the BPM directivity function, the velocity/Mach number doesn't include induction.
Vtotal = se.y1dot
# Mach number vectory in the direction of the radiation vector.
Mtotcosξr = dot_cs_safe(Vtotal, r_uvec)/se.c0
# Convective amplification factor for the low-freqency directivity function.
conv_amp_l = 1/(1 - Mtotcosξr)^4
# The BPM high-frequency convective amplification factor is a bit different.
# It has a factor (M - M_c)*cos(Θ_er), which, in the more general coordinate system of Brooks & Burley would be -(M - M_c)*cos(ξ_r).
# So, the `M` is the speed of the blade element without induction, and `M_c` is the velocity of the blade element including induction.
# So `M_c = se.y1dot - se.y1dot_fluid` and then `M - M_c = se.y1dot - (se.y1dot - se.y1dot_fluid) = se.y1dot_fluid.
# And so what we'd want to do is this:
M_minus_M_ccosξr = dot_cs_safe(se.y1dot_fluid, r_uvec)/se.c0
conv_amp_h = 1/((1 - Mtotcosξr)*(1 - M_minus_M_ccosξr)^2)
# Now I can finally find the directivity function!
# Equation (B2) from the BPM report.
Dl = (sin2Θer*sin2Φer)*conv_amp_l
# Now I can finally find the directivity function!
# Equation (B1) from the BPM report.
Dh = (twosin2halfΘer*sin2Φer)*conv_amp_h
return r_er, Dl, Dh
end
function angle_of_attack(se::AbstractBroadbandSourceElement)
# Find the total velocity of the fluid from the perspective of the blade element, which is just the total velocity of the blade element from the perspective of the fluid with the sign switched.
# Vtotal = -(se.y1dot - se.y1dot_fluid)
Vtotal = se.y1dot_fluid - se.y1dot
# To get the angle of attack, I need to find the components of the velocity in the chordwise direction, and the direction normal to both the chord and span.
# So, first need to get a vector normal to both the chord and span, pointing from pressure side to suction side.
normal_uvec_tmp = ifelse(se.chord_cross_span_to_get_top_uvec,
cross(se.chord_uvec, se.span_uvec),
cross(se.span_uvec, se.chord_uvec))
normal_uvec = normal_uvec_tmp ./ norm_cs_safe(normal_uvec_tmp)
# Now get the component of velocity in the chord_uvec and normal_uvec directions.
V_chordwise = dot_cs_safe(Vtotal, se.chord_uvec)
V_normal = dot_cs_safe(Vtotal, normal_uvec)
# Now we can find the angle of attack.
alphastar = atan_cs_safe(V_normal, V_chordwise)
# alphastar = atan(V_normal, V_chordwise)
return alphastar
end
function speed_normal_to_span(se::AbstractBroadbandSourceElement{TDirect,true}) where {TDirect}
# Find the total velocity of the fluid including induction, from the perspective of the blade element, which is just the total velocity of the blade element from the perspective of the fluid with the sign switched.
Vtotal = se.y1dot_fluid - se.y1dot
# Find the component of the velocity in the direction of the span.
Vspan = dot_cs_safe(Vtotal, se.span_uvec)*se.span_uvec
# Subtract that from the total velocity to get the velocity normal to the span, then get the norm for the speed normal to span.
return norm_cs_safe(Vtotal - Vspan)
end
function speed_normal_to_span(se::AbstractBroadbandSourceElement{TDirect,false}) where {TDirect}
# Find the total velocity of the fluid, not including induction, from the perspective of the blade element, which is just the total velocity of the blade element from the perspective of the fluid with the sign switched.
Vtotal = -se.y1dot
# Find the component of the velocity in the direction of the span.
Vspan = dot_cs_safe(Vtotal, se.span_uvec)*se.span_uvec
# Subtract that from the total velocity to get the velocity normal to the span, then get the norm for the speed normal to span.
return norm_cs_safe(Vtotal - Vspan)
end
function noise(se::AbstractBroadbandSourceElement, obs::AbstractAcousticObserver, freqs::AcousticMetrics.AbstractProportionalBands{3, :center})
t_obs = adv_time(se, obs)
return noise(se, obs, t_obs, freqs)
end
| AcousticAnalogies | https://github.com/OpenMDAO/AcousticAnalogies.jl.git |
|
[
"Apache-2.0"
] | 0.8.1 | c14d0b2e7f19374017a2b5b6dfe48c5723c791ae | code | 891 | abstract type AbstractCompactSourceElement end
"""
velocity(se::AbstractCompactSourceElement)
Return the current velocity of `se`.
"""
@inline velocity(se::AbstractCompactSourceElement) = se.y1dot
"""
source_time(se::AbstractCompactSourceElement)
Return the source time of `se`.
"""
@inline source_time(se::AbstractCompactSourceElement) = se.τ
"""
orientation(se::AbstractCompactSourceElement)
Return a length-3 unit vector indicating the spanwise orientation of `se`.
"""
@inline orientation(se::AbstractCompactSourceElement) = se.span_uvec
"""
position(se::AbstractCompactSourceElement)
Return a length-3 vector indicating the position of `se`.
"""
@inline position(se::AbstractCompactSourceElement) = se.y0dot
"""
speed_of_sound(se::AbstractCompactSourceElement)
Return the ambient speed of sound associated with `se`.
"""
@inline speed_of_sound(se) = se.c0
| AcousticAnalogies | https://github.com/OpenMDAO/AcousticAnalogies.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.