response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Train a k nearest neighbors classifier on a training set.
xs is a list of observations and ys is a list of the class assignments.
Thus, xs and ys should contain the same number of elements. k is
the number of neighbors that should be examined when doing the
classification. | def train(xs, ys, k, typecode=None):
"""Train a k nearest neighbors classifier on a training set.
xs is a list of observations and ys is a list of the class assignments.
Thus, xs and ys should contain the same number of elements. k is
the number of neighbors that should be examined when doing the
classification.
"""
knn = kNN()
knn.classes = set(ys)
knn.xs = np.asarray(xs, typecode)
knn.ys = ys
knn.k = k
return knn |
Calculate the probability for each class.
Arguments:
- x is the observed data.
- weight_fn is an optional function that takes x and a training
example, and returns a weight.
- distance_fn is an optional function that takes two points and
returns the distance between them. If distance_fn is None (the
default), the Euclidean distance is used.
Returns a dictionary of the class to the weight given to the class. | def calculate(knn, x, weight_fn=None, distance_fn=None):
"""Calculate the probability for each class.
Arguments:
- x is the observed data.
- weight_fn is an optional function that takes x and a training
example, and returns a weight.
- distance_fn is an optional function that takes two points and
returns the distance between them. If distance_fn is None (the
default), the Euclidean distance is used.
Returns a dictionary of the class to the weight given to the class.
"""
if weight_fn is None:
weight_fn = equal_weight
x = np.asarray(x)
order = [] # list of (distance, index)
if distance_fn:
for i in range(len(knn.xs)):
dist = distance_fn(x, knn.xs[i])
order.append((dist, i))
else:
# Default: Use a fast implementation of the Euclidean distance
temp = np.zeros(len(x))
# Predefining temp allows reuse of this array, making this
# function about twice as fast.
for i in range(len(knn.xs)):
temp[:] = x - knn.xs[i]
dist = np.sqrt(np.dot(temp, temp))
order.append((dist, i))
order.sort()
# first 'k' are the ones I want.
weights = {} # class -> number of votes
for k in knn.classes:
weights[k] = 0.0
for dist, i in order[: knn.k]:
klass = knn.ys[i]
weights[klass] = weights[klass] + weight_fn(x, knn.xs[i])
return weights |
Classify an observation into a class.
If not specified, weight_fn will give all neighbors equal weight.
distance_fn is an optional function that takes two points and returns
the distance between them. If distance_fn is None (the default),
the Euclidean distance is used. | def classify(knn, x, weight_fn=None, distance_fn=None):
"""Classify an observation into a class.
If not specified, weight_fn will give all neighbors equal weight.
distance_fn is an optional function that takes two points and returns
the distance between them. If distance_fn is None (the default),
the Euclidean distance is used.
"""
if weight_fn is None:
weight_fn = equal_weight
weights = calculate(knn, x, weight_fn=weight_fn, distance_fn=distance_fn)
most_class = None
most_weight = None
for klass, weight in weights.items():
if most_class is None or weight > most_weight:
most_class = klass
most_weight = weight
return most_class |
Train a logistic regression classifier on a training set.
Argument xs is a list of observations and ys is a list of the class
assignments, which should be 0 or 1. xs and ys should contain the
same number of elements. update_fn is an optional callback function
that takes as parameters that iteration number and log likelihood. | def train(xs, ys, update_fn=None, typecode=None):
"""Train a logistic regression classifier on a training set.
Argument xs is a list of observations and ys is a list of the class
assignments, which should be 0 or 1. xs and ys should contain the
same number of elements. update_fn is an optional callback function
that takes as parameters that iteration number and log likelihood.
"""
if len(xs) != len(ys):
raise ValueError("xs and ys should be the same length.")
classes = set(ys)
if classes != {0, 1}:
raise ValueError("Classes should be 0's and 1's")
if typecode is None:
typecode = "d"
# Dimensionality of the data is the dimensionality of the
# observations plus a constant dimension.
N, ndims = len(xs), len(xs[0]) + 1
if N == 0 or ndims == 1:
raise ValueError("No observations or observation of 0 dimension.")
# Make an X array, with a constant first dimension.
X = np.ones((N, ndims), typecode)
X[:, 1:] = xs
Xt = np.transpose(X)
y = np.asarray(ys, typecode)
# Initialize the beta parameter to 0.
beta = np.zeros(ndims, typecode)
MAX_ITERATIONS = 500
CONVERGE_THRESHOLD = 0.01
stepsize = 1.0
# Now iterate using Newton-Raphson until the log-likelihoods
# converge.
i = 0
old_beta = old_llik = None
while i < MAX_ITERATIONS:
# Calculate the probabilities. p = e^(beta X) / (1+e^(beta X))
ebetaX = np.exp(np.dot(beta, Xt))
p = ebetaX / (1 + ebetaX)
# Find the log likelihood score and see if I've converged.
logp = y * np.log(p) + (1 - y) * np.log(1 - p)
llik = sum(logp)
if update_fn is not None:
update_fn(iter, llik)
if old_llik is not None:
# Check to see if the likelihood decreased. If it did, then
# restore the old beta parameters and half the step size.
if llik < old_llik:
stepsize /= 2.0
beta = old_beta
# If I've converged, then stop.
if np.fabs(llik - old_llik) <= CONVERGE_THRESHOLD:
break
old_llik, old_beta = llik, beta
i += 1
W = np.identity(N) * p
Xtyp = np.dot(Xt, y - p) # Calculate the first derivative.
XtWX = np.dot(np.dot(Xt, W), X) # Calculate the second derivative.
delta = numpy.linalg.solve(XtWX, Xtyp)
if np.fabs(stepsize - 1.0) > 0.001:
delta *= stepsize
beta += delta # Update beta.
else:
raise RuntimeError("Didn't converge.")
lr = LogisticRegression()
lr.beta = list(beta)
return lr |
Calculate the probability for each class.
Arguments:
- lr is a LogisticRegression object.
- x is the observed data.
Returns a list of the probability that it fits each class. | def calculate(lr, x):
"""Calculate the probability for each class.
Arguments:
- lr is a LogisticRegression object.
- x is the observed data.
Returns a list of the probability that it fits each class.
"""
# Insert a constant term for x.
x = np.asarray([1.0] + x)
# Calculate the probability. p = e^(beta X) / (1+e^(beta X))
ebetaX = np.exp(np.dot(lr.beta, x))
p = ebetaX / (1 + ebetaX)
return [1 - p, p] |
Classify an observation into a class. | def classify(lr, x):
"""Classify an observation into a class."""
probs = calculate(lr, x)
if probs[0] > probs[1]:
return 0
return 1 |
Return a dictionary of values with their sequence offset as keys. | def itemindex(values):
"""Return a dictionary of values with their sequence offset as keys."""
d = {}
entries = enumerate(values[::-1])
n = len(values) - 1
for index, key in entries:
d[key] = n - index
return d |
Read the first line and evaluate that begisn with the correct start (PRIVATE). | def _readline_and_check_start(handle, start):
"""Read the first line and evaluate that begisn with the correct start (PRIVATE)."""
line = handle.readline()
if not line.startswith(start):
raise ValueError(f"I expected {start!r} but got {line!r}")
return line |
Parse a file handle into a MarkovModel object. | def load(handle):
"""Parse a file handle into a MarkovModel object."""
# Load the states.
line = _readline_and_check_start(handle, "STATES:")
states = line.split()[1:]
# Load the alphabet.
line = _readline_and_check_start(handle, "ALPHABET:")
alphabet = line.split()[1:]
mm = MarkovModel(states, alphabet)
N, M = len(states), len(alphabet)
# Load the initial probabilities.
mm.p_initial = np.zeros(N)
line = _readline_and_check_start(handle, "INITIAL:")
for i in range(len(states)):
line = _readline_and_check_start(handle, f" {states[i]}:")
mm.p_initial[i] = float(line.split()[-1])
# Load the transition.
mm.p_transition = np.zeros((N, N))
line = _readline_and_check_start(handle, "TRANSITION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, f" {states[i]}:")
mm.p_transition[i, :] = [float(v) for v in line.split()[1:]]
# Load the emission.
mm.p_emission = np.zeros((N, M))
line = _readline_and_check_start(handle, "EMISSION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, f" {states[i]}:")
mm.p_emission[i, :] = [float(v) for v in line.split()[1:]]
return mm |
Save MarkovModel object into handle. | def save(mm, handle):
"""Save MarkovModel object into handle."""
# This will fail if there are spaces in the states or alphabet.
w = handle.write
w(f"STATES: {' '.join(mm.states)}\n")
w(f"ALPHABET: {' '.join(mm.alphabet)}\n")
w("INITIAL:\n")
for i in range(len(mm.p_initial)):
w(f" {mm.states[i]}: {mm.p_initial[i]:g}\n")
w("TRANSITION:\n")
for i in range(len(mm.p_transition)):
w(f" {mm.states[i]}: {' '.join(str(x) for x in mm.p_transition[i])}\n")
w("EMISSION:\n")
for i in range(len(mm.p_emission)):
w(f" {mm.states[i]}: {' '.join(str(x) for x in mm.p_emission[i])}\n") |
Train a MarkovModel using the Baum-Welch algorithm.
Train a MarkovModel using the Baum-Welch algorithm. states is a list
of strings that describe the names of each state. alphabet is a
list of objects that indicate the allowed outputs. training_data
is a list of observations. Each observation is a list of objects
from the alphabet.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix, before
normalization.
update_fn is an optional callback that takes parameters
(iteration, log_likelihood). It is called once per iteration. | def train_bw(
states,
alphabet,
training_data,
pseudo_initial=None,
pseudo_transition=None,
pseudo_emission=None,
update_fn=None,
):
"""Train a MarkovModel using the Baum-Welch algorithm.
Train a MarkovModel using the Baum-Welch algorithm. states is a list
of strings that describe the names of each state. alphabet is a
list of objects that indicate the allowed outputs. training_data
is a list of observations. Each observation is a list of objects
from the alphabet.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix, before
normalization.
update_fn is an optional callback that takes parameters
(iteration, log_likelihood). It is called once per iteration.
"""
N, M = len(states), len(alphabet)
if not training_data:
raise ValueError("No training data given.")
if pseudo_initial is not None:
pseudo_initial = np.asarray(pseudo_initial)
if pseudo_initial.shape != (N,):
raise ValueError("pseudo_initial not shape len(states)")
if pseudo_transition is not None:
pseudo_transition = np.asarray(pseudo_transition)
if pseudo_transition.shape != (N, N):
raise ValueError("pseudo_transition not shape len(states) X len(states)")
if pseudo_emission is not None:
pseudo_emission = np.asarray(pseudo_emission)
if pseudo_emission.shape != (N, M):
raise ValueError("pseudo_emission not shape len(states) X len(alphabet)")
# Training data is given as a list of members of the alphabet.
# Replace those with indexes into the alphabet list for easier
# computation.
training_outputs = []
indexes = itemindex(alphabet)
for outputs in training_data:
training_outputs.append([indexes[x] for x in outputs])
# Do some sanity checking on the outputs.
lengths = [len(x) for x in training_outputs]
if min(lengths) == 0:
raise ValueError("I got training data with outputs of length 0")
# Do the training with baum welch.
x = _baum_welch(
N,
M,
training_outputs,
pseudo_initial=pseudo_initial,
pseudo_transition=pseudo_transition,
pseudo_emission=pseudo_emission,
update_fn=update_fn,
)
p_initial, p_transition, p_emission = x
return MarkovModel(states, alphabet, p_initial, p_transition, p_emission) |
Implement the Baum-Welch algorithm to evaluate unknown parameters in the MarkovModel object (PRIVATE). | def _baum_welch(
N,
M,
training_outputs,
p_initial=None,
p_transition=None,
p_emission=None,
pseudo_initial=None,
pseudo_transition=None,
pseudo_emission=None,
update_fn=None,
):
"""Implement the Baum-Welch algorithm to evaluate unknown parameters in the MarkovModel object (PRIVATE)."""
if p_initial is None:
p_initial = _random_norm(N)
else:
p_initial = _copy_and_check(p_initial, (N,))
if p_transition is None:
p_transition = _random_norm((N, N))
else:
p_transition = _copy_and_check(p_transition, (N, N))
if p_emission is None:
p_emission = _random_norm((N, M))
else:
p_emission = _copy_and_check(p_emission, (N, M))
# Do all the calculations in log space to avoid underflows.
lp_initial = np.log(p_initial)
lp_transition = np.log(p_transition)
lp_emission = np.log(p_emission)
if pseudo_initial is not None:
lpseudo_initial = np.log(pseudo_initial)
else:
lpseudo_initial = None
if pseudo_transition is not None:
lpseudo_transition = np.log(pseudo_transition)
else:
lpseudo_transition = None
if pseudo_emission is not None:
lpseudo_emission = np.log(pseudo_emission)
else:
lpseudo_emission = None
# Iterate through each sequence of output, updating the parameters
# to the HMM. Stop when the log likelihoods of the sequences
# stops varying.
prev_llik = None
for i in range(MAX_ITERATIONS):
llik = LOG0
for outputs in training_outputs:
llik += _baum_welch_one(
N,
M,
outputs,
lp_initial,
lp_transition,
lp_emission,
lpseudo_initial,
lpseudo_transition,
lpseudo_emission,
)
if update_fn is not None:
update_fn(i, llik)
if prev_llik is not None and np.fabs(prev_llik - llik) < 0.1:
break
prev_llik = llik
else:
raise RuntimeError("HMM did not converge in %d iterations" % MAX_ITERATIONS)
# Return everything back in normal space.
return [np.exp(_) for _ in (lp_initial, lp_transition, lp_emission)] |
Execute one step for Baum-Welch algorithm (PRIVATE).
Do one iteration of Baum-Welch based on a sequence of output.
Changes the value for lp_initial, lp_transition and lp_emission in place. | def _baum_welch_one(
N,
M,
outputs,
lp_initial,
lp_transition,
lp_emission,
lpseudo_initial,
lpseudo_transition,
lpseudo_emission,
):
"""Execute one step for Baum-Welch algorithm (PRIVATE).
Do one iteration of Baum-Welch based on a sequence of output.
Changes the value for lp_initial, lp_transition and lp_emission in place.
"""
T = len(outputs)
fmat = _forward(N, T, lp_initial, lp_transition, lp_emission, outputs)
bmat = _backward(N, T, lp_transition, lp_emission, outputs)
# Calculate the probability of traversing each arc for any given
# transition.
lp_arc = np.zeros((N, N, T))
for t in range(T):
k = outputs[t]
lp_traverse = np.zeros((N, N)) # P going over one arc.
for i in range(N):
for j in range(N):
# P(getting to this arc)
# P(making this transition)
# P(emitting this character)
# P(going to the end)
lp = (
fmat[i][t]
+ lp_transition[i][j]
+ lp_emission[i][k]
+ bmat[j][t + 1]
)
lp_traverse[i][j] = lp
# Normalize the probability for this time step.
lp_arc[:, :, t] = lp_traverse - _logsum(lp_traverse)
# Sum of all the transitions out of state i at time t.
lp_arcout_t = np.zeros((N, T))
for t in range(T):
for i in range(N):
lp_arcout_t[i][t] = _logsum(lp_arc[i, :, t])
# Sum of all the transitions out of state i.
lp_arcout = np.zeros(N)
for i in range(N):
lp_arcout[i] = _logsum(lp_arcout_t[i, :])
# UPDATE P_INITIAL.
lp_initial = lp_arcout_t[:, 0]
if lpseudo_initial is not None:
lp_initial = _logvecadd(lp_initial, lpseudo_initial)
lp_initial = lp_initial - _logsum(lp_initial)
# UPDATE P_TRANSITION. p_transition[i][j] is the sum of all the
# transitions from i to j, normalized by the sum of the
# transitions out of i.
for i in range(N):
for j in range(N):
lp_transition[i][j] = _logsum(lp_arc[i, j, :]) - lp_arcout[i]
if lpseudo_transition is not None:
lp_transition[i] = _logvecadd(lp_transition[i], lpseudo_transition)
lp_transition[i] = lp_transition[i] - _logsum(lp_transition[i])
# UPDATE P_EMISSION. lp_emission[i][k] is the sum of all the
# transitions out of i when k is observed, divided by the sum of
# the transitions out of i.
for i in range(N):
ksum = np.zeros(M) + LOG0 # ksum[k] is the sum of all i with k.
for t in range(T):
k = outputs[t]
for j in range(N):
ksum[k] = logaddexp(ksum[k], lp_arc[i, j, t])
ksum = ksum - _logsum(ksum) # Normalize
if lpseudo_emission is not None:
ksum = _logvecadd(ksum, lpseudo_emission[i])
ksum = ksum - _logsum(ksum) # Renormalize
lp_emission[i, :] = ksum
# Calculate the log likelihood of the output based on the forward
# matrix. Since the parameters of the HMM has changed, the log
# likelihoods are going to be a step behind, and we might be doing
# one extra iteration of training. The alternative is to rerun
# the _forward algorithm and calculate from the clean one, but
# that may be more expensive than overshooting the training by one
# step.
return _logsum(fmat[:, T]) |
Implement forward algorithm (PRIVATE).
Calculate a Nx(T+1) matrix, where the last column is the total
probability of the output. | def _forward(N, T, lp_initial, lp_transition, lp_emission, outputs):
"""Implement forward algorithm (PRIVATE).
Calculate a Nx(T+1) matrix, where the last column is the total
probability of the output.
"""
matrix = np.zeros((N, T + 1))
# Initialize the first column to be the initial values.
matrix[:, 0] = lp_initial
for t in range(1, T + 1):
k = outputs[t - 1]
for j in range(N):
# The probability of the state is the sum of the
# transitions from all the states from time t-1.
lprob = LOG0
for i in range(N):
lp = matrix[i][t - 1] + lp_transition[i][j] + lp_emission[i][k]
lprob = logaddexp(lprob, lp)
matrix[j][t] = lprob
return matrix |
Implement backward algorithm (PRIVATE). | def _backward(N, T, lp_transition, lp_emission, outputs):
"""Implement backward algorithm (PRIVATE)."""
matrix = np.zeros((N, T + 1))
for t in range(T - 1, -1, -1):
k = outputs[t]
for i in range(N):
# The probability of the state is the sum of the
# transitions from all the states from time t+1.
lprob = LOG0
for j in range(N):
lp = matrix[j][t + 1] + lp_transition[i][j] + lp_emission[i][k]
lprob = logaddexp(lprob, lp)
matrix[i][t] = lprob
return matrix |
Train a visible MarkovModel using maximum likelihoood estimates for each of the parameters.
Train a visible MarkovModel using maximum likelihoood estimates
for each of the parameters. states is a list of strings that
describe the names of each state. alphabet is a list of objects
that indicate the allowed outputs. training_data is a list of
(outputs, observed states) where outputs is a list of the emission
from the alphabet, and observed states is a list of states from
states.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix. | def train_visible(
states,
alphabet,
training_data,
pseudo_initial=None,
pseudo_transition=None,
pseudo_emission=None,
):
"""Train a visible MarkovModel using maximum likelihoood estimates for each of the parameters.
Train a visible MarkovModel using maximum likelihoood estimates
for each of the parameters. states is a list of strings that
describe the names of each state. alphabet is a list of objects
that indicate the allowed outputs. training_data is a list of
(outputs, observed states) where outputs is a list of the emission
from the alphabet, and observed states is a list of states from
states.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix.
"""
N, M = len(states), len(alphabet)
if pseudo_initial is not None:
pseudo_initial = np.asarray(pseudo_initial)
if pseudo_initial.shape != (N,):
raise ValueError("pseudo_initial not shape len(states)")
if pseudo_transition is not None:
pseudo_transition = np.asarray(pseudo_transition)
if pseudo_transition.shape != (N, N):
raise ValueError("pseudo_transition not shape len(states) X len(states)")
if pseudo_emission is not None:
pseudo_emission = np.asarray(pseudo_emission)
if pseudo_emission.shape != (N, M):
raise ValueError("pseudo_emission not shape len(states) X len(alphabet)")
# Training data is given as a list of members of the alphabet.
# Replace those with indexes into the alphabet list for easier
# computation.
training_states, training_outputs = [], []
states_indexes = itemindex(states)
outputs_indexes = itemindex(alphabet)
for toutputs, tstates in training_data:
if len(tstates) != len(toutputs):
raise ValueError("states and outputs not aligned")
training_states.append([states_indexes[x] for x in tstates])
training_outputs.append([outputs_indexes[x] for x in toutputs])
x = _mle(
N,
M,
training_outputs,
training_states,
pseudo_initial,
pseudo_transition,
pseudo_emission,
)
p_initial, p_transition, p_emission = x
return MarkovModel(states, alphabet, p_initial, p_transition, p_emission) |
Implement Maximum likelihood estimation algorithm (PRIVATE). | def _mle(
N,
M,
training_outputs,
training_states,
pseudo_initial,
pseudo_transition,
pseudo_emission,
):
"""Implement Maximum likelihood estimation algorithm (PRIVATE)."""
# p_initial is the probability that a sequence of states starts
# off with a particular one.
p_initial = np.zeros(N)
if pseudo_initial:
p_initial = p_initial + pseudo_initial
for states in training_states:
p_initial[states[0]] += 1
p_initial = _normalize(p_initial)
# p_transition is the probability that a state leads to the next
# one. C(i,j)/C(i) where i and j are states.
p_transition = np.zeros((N, N))
if pseudo_transition:
p_transition = p_transition + pseudo_transition
for states in training_states:
for n in range(len(states) - 1):
i, j = states[n], states[n + 1]
p_transition[i, j] += 1
for i in range(len(p_transition)):
p_transition[i, :] = p_transition[i, :] / sum(p_transition[i, :])
# p_emission is the probability of an output given a state.
# C(s,o)|C(s) where o is an output and s is a state.
p_emission = np.zeros((N, M))
if pseudo_emission:
p_emission = p_emission + pseudo_emission
p_emission = np.ones((N, M))
for outputs, states in zip(training_outputs, training_states):
for o, s in zip(outputs, states):
p_emission[s, o] += 1
for i in range(len(p_emission)):
p_emission[i, :] = p_emission[i, :] / sum(p_emission[i, :])
return p_initial, p_transition, p_emission |
Return indices of the maximum values aong the vector (PRIVATE). | def _argmaxes(vector, allowance=None):
"""Return indices of the maximum values aong the vector (PRIVATE)."""
return [np.argmax(vector)] |
Find states in the given Markov model output.
Returns a list of (states, score) tuples. | def find_states(markov_model, output):
"""Find states in the given Markov model output.
Returns a list of (states, score) tuples.
"""
mm = markov_model
N = len(mm.states)
# _viterbi does calculations in log space. Add a tiny bit to the
# matrices so that the logs will not break.
lp_initial = np.log(mm.p_initial + VERY_SMALL_NUMBER)
lp_transition = np.log(mm.p_transition + VERY_SMALL_NUMBER)
lp_emission = np.log(mm.p_emission + VERY_SMALL_NUMBER)
# Change output into a list of indexes into the alphabet.
indexes = itemindex(mm.alphabet)
output = [indexes[x] for x in output]
# Run the viterbi algorithm.
results = _viterbi(N, lp_initial, lp_transition, lp_emission, output)
for i in range(len(results)):
states, score = results[i]
results[i] = [mm.states[x] for x in states], np.exp(score)
return results |
Implement Viterbi algorithm to find most likely states for a given input (PRIVATE). | def _viterbi(N, lp_initial, lp_transition, lp_emission, output):
"""Implement Viterbi algorithm to find most likely states for a given input (PRIVATE)."""
T = len(output)
# Store the backtrace in a NxT matrix.
backtrace = [] # list of indexes of states in previous timestep.
for i in range(N):
backtrace.append([None] * T)
# Store the best scores.
scores = np.zeros((N, T))
scores[:, 0] = lp_initial + lp_emission[:, output[0]]
for t in range(1, T):
k = output[t]
for j in range(N):
# Find the most likely place it came from.
i_scores = scores[:, t - 1] + lp_transition[:, j] + lp_emission[j, k]
indexes = _argmaxes(i_scores)
scores[j, t] = i_scores[indexes[0]]
backtrace[j][t] = indexes
# Do the backtrace. First, find a good place to start. Then,
# we'll follow the backtrace matrix to find the list of states.
# In the event of ties, there may be multiple paths back through
# the matrix, which implies a recursive solution. We'll simulate
# it by keeping our own stack.
in_process = [] # list of (t, states, score)
results = [] # return values. list of (states, score)
indexes = _argmaxes(scores[:, T - 1]) # pick the first place
for i in indexes:
in_process.append((T - 1, [i], scores[i][T - 1]))
while in_process:
t, states, score = in_process.pop()
if t == 0:
results.append((states, score))
else:
indexes = backtrace[states[0]][t]
for i in indexes:
in_process.append((t - 1, [i] + states, score))
return results |
Normalize matrix object (PRIVATE). | def _normalize(matrix):
"""Normalize matrix object (PRIVATE)."""
if len(matrix.shape) == 1:
matrix = matrix / sum(matrix)
elif len(matrix.shape) == 2:
# Normalize by rows.
for i in range(len(matrix)):
matrix[i, :] = matrix[i, :] / sum(matrix[i, :])
else:
raise ValueError("I cannot handle matrixes of that shape")
return matrix |
Normalize a uniform matrix (PRIVATE). | def _uniform_norm(shape):
"""Normalize a uniform matrix (PRIVATE)."""
matrix = np.ones(shape)
return _normalize(matrix) |
Normalize a random matrix (PRIVATE). | def _random_norm(shape):
"""Normalize a random matrix (PRIVATE)."""
matrix = np.random.random(shape)
return _normalize(matrix) |
Copy a matrix and check its dimension. Normalize at the end (PRIVATE). | def _copy_and_check(matrix, desired_shape):
"""Copy a matrix and check its dimension. Normalize at the end (PRIVATE)."""
# Copy the matrix.
matrix = np.array(matrix, copy=1)
# Check the dimensions.
if matrix.shape != desired_shape:
raise ValueError("Incorrect dimension")
# Make sure it's normalized.
if len(matrix.shape) == 1:
if np.fabs(sum(matrix) - 1.0) > 0.01:
raise ValueError("matrix not normalized to 1.0")
elif len(matrix.shape) == 2:
for i in range(len(matrix)):
if np.fabs(sum(matrix[i]) - 1.0) > 0.01:
raise ValueError("matrix %d not normalized to 1.0" % i)
else:
raise ValueError("I don't handle matrices > 2 dimensions")
return matrix |
Implement logsum for a matrix object (PRIVATE). | def _logsum(matrix):
"""Implement logsum for a matrix object (PRIVATE)."""
if len(matrix.shape) > 1:
vec = np.reshape(matrix, (np.prod(matrix.shape),))
else:
vec = matrix
sum = LOG0
for num in vec:
sum = logaddexp(sum, num)
return sum |
Implement a log sum for two vector objects (PRIVATE). | def _logvecadd(logvec1, logvec2):
"""Implement a log sum for two vector objects (PRIVATE)."""
assert len(logvec1) == len(logvec2), "vectors aren't the same length"
sumvec = np.zeros(len(logvec1))
for i in range(len(logvec1)):
sumvec[i] = logaddexp(logvec1[i], logvec2[i])
return sumvec |
Return the exponential of a logsum (PRIVATE). | def _exp_logsum(numbers):
"""Return the exponential of a logsum (PRIVATE)."""
sum = _logsum(numbers)
return np.exp(sum) |
Calculate the log of the probability for each class.
me is a MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class. | def calculate(me, observation):
"""Calculate the log of the probability for each class.
me is a MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class.
"""
scores = []
assert len(me.feature_fns) == len(me.alphas)
for klass in me.classes:
lprob = 0.0
for fn, alpha in zip(me.feature_fns, me.alphas):
lprob += fn(observation, klass) * alpha
scores.append(lprob)
return scores |
Classify an observation into a class. | def classify(me, observation):
"""Classify an observation into a class."""
scores = calculate(me, observation)
max_score, klass = scores[0], me.classes[0]
for i in range(1, len(scores)):
if scores[i] > max_score:
max_score, klass = scores[i], me.classes[i]
return klass |
Evaluate a feature function on every instance of the training set and class (PRIVATE).
fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary. | def _eval_feature_fn(fn, xs, classes):
"""Evaluate a feature function on every instance of the training set and class (PRIVATE).
fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values |
Calculate the expectation of each function from the data (PRIVATE).
This is the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features. | def _calc_empirical_expects(xs, ys, classes, features):
"""Calculate the expectation of each function from the data (PRIVATE).
This is the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features.
"""
# E[f_i] = SUM_x,y P(x, y) f(x, y)
# = 1/N f(x, y)
class2index = {}
for index, key in enumerate(classes):
class2index[key] = index
ys_i = [class2index[y] for y in ys]
expect = []
N = len(xs)
for feature in features:
s = 0
for i in range(N):
s += feature.get((i, ys_i[i]), 0)
expect.append(s / N)
return expect |
Calculate the expectation of each feature from the model (PRIVATE).
This is not used in maximum entropy training, but provides a good function
for debugging. | def _calc_model_expects(xs, classes, features, alphas):
"""Calculate the expectation of each feature from the model (PRIVATE).
This is not used in maximum entropy training, but provides a good function
for debugging.
"""
# SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
# = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
expects = []
for feature in features:
sum = 0.0
for (i, j), f in feature.items():
sum += p_yx[i][j] * f
expects.append(sum / len(xs))
return expects |
Calculate conditional probability P(y|x) (PRIVATE).
y is the class and x is an instance from the training set.
Return a XSxCLASSES matrix of probabilities. | def _calc_p_class_given_x(xs, classes, features, alphas):
"""Calculate conditional probability P(y|x) (PRIVATE).
y is the class and x is an instance from the training set.
Return a XSxCLASSES matrix of probabilities.
"""
prob_yx = np.zeros((len(xs), len(classes)))
# Calculate log P(y, x).
assert len(features) == len(alphas)
for feature, alpha in zip(features, alphas):
for (x, y), f in feature.items():
prob_yx[x][y] += alpha * f
# Take an exponent to get P(y, x)
prob_yx = np.exp(prob_yx)
# Divide out the probability over each class, so we get P(y|x).
for i in range(len(xs)):
z = sum(prob_yx[i])
prob_yx[i] = prob_yx[i] / z
return prob_yx |
Calculate a matrix of f sharp values (PRIVATE). | def _calc_f_sharp(N, nclasses, features):
"""Calculate a matrix of f sharp values (PRIVATE)."""
# f#(x, y) = SUM_i feature(x, y)
f_sharp = np.zeros((N, nclasses))
for feature in features:
for (i, j), f in feature.items():
f_sharp[i][j] += f
return f_sharp |
Solve delta using Newton's method (PRIVATE). | def _iis_solve_delta(
N, feature, f_sharp, empirical, prob_yx, max_newton_iterations, newton_converge
):
"""Solve delta using Newton's method (PRIVATE)."""
# SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
delta = 0.0
iters = 0
while iters < max_newton_iterations: # iterate for Newton's method
f_newton = df_newton = 0.0 # evaluate the function and derivative
for (i, j), f in feature.items():
prod = prob_yx[i][j] * f * np.exp(delta * f_sharp[i][j])
f_newton += prod
df_newton += prod * f_sharp[i][j]
f_newton, df_newton = empirical - f_newton / N, -df_newton / N
ratio = f_newton / df_newton
delta -= ratio
if np.fabs(ratio) < newton_converge: # converged
break
iters = iters + 1
else:
raise RuntimeError("Newton's method did not converge")
return delta |
Do one iteration of hill climbing to find better alphas (PRIVATE). | def _train_iis(
xs,
classes,
features,
f_sharp,
alphas,
e_empirical,
max_newton_iterations,
newton_converge,
):
"""Do one iteration of hill climbing to find better alphas (PRIVATE)."""
# This is a good function to parallelize.
# Pre-calculate P(y|x)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
N = len(xs)
newalphas = alphas[:]
for i in range(len(alphas)):
delta = _iis_solve_delta(
N,
features[i],
f_sharp,
e_empirical[i],
p_yx,
max_newton_iterations,
newton_converge,
)
newalphas[i] += delta
return newalphas |
Train a maximum entropy classifier, returns MaxEntropy object.
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that is called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
The maximum number of iterations and the convergence criterion for IIS
are given by max_iis_iterations and iis_converge, respectively, while
max_newton_iterations and newton_converge are the maximum number
of iterations and the convergence criterion for Newton's method. | def train(
training_set,
results,
feature_fns,
update_fn=None,
max_iis_iterations=10000,
iis_converge=1.0e-5,
max_newton_iterations=100,
newton_converge=1.0e-10,
):
"""Train a maximum entropy classifier, returns MaxEntropy object.
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that is called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
The maximum number of iterations and the convergence criterion for IIS
are given by max_iis_iterations and iis_converge, respectively, while
max_newton_iterations and newton_converge are the maximum number
of iterations and the convergence criterion for Newton's method.
"""
if not training_set:
raise ValueError("No data in the training set.")
if len(training_set) != len(results):
raise ValueError("training_set and results should be parallel lists.")
# Rename variables for convenience.
xs, ys = training_set, results
# Get a list of all the classes that need to be trained.
classes = sorted(set(results))
# Cache values for all features.
features = [_eval_feature_fn(fn, training_set, classes) for fn in feature_fns]
# Cache values for f#.
f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
# Pre-calculate the empirical expectations of the features.
e_empirical = _calc_empirical_expects(xs, ys, classes, features)
# Now train the alpha parameters to weigh each feature.
alphas = [0.0] * len(features)
iters = 0
while iters < max_iis_iterations:
nalphas = _train_iis(
xs,
classes,
features,
f_sharp,
alphas,
e_empirical,
max_newton_iterations,
newton_converge,
)
diff = [np.fabs(x - y) for x, y in zip(alphas, nalphas)]
diff = reduce(np.add, diff, 0)
alphas = nalphas
me = MaxEntropy()
me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
if update_fn is not None:
update_fn(me)
if diff < iis_converge: # converged
break
else:
raise RuntimeError("IIS did not converge")
return me |
Return a dictionary where the key is the item and the value is the probability associated (PRIVATE). | def _contents(items):
"""Return a dictionary where the key is the item and the value is the probability associated (PRIVATE)."""
term = 1.0 / len(items)
counts = {}
for item in items:
counts[item] = counts.get(item, 0) + term
return counts |
Calculate the logarithmic conditional probability for each class.
Arguments:
- nb - A NaiveBayes classifier that has been trained.
- observation - A list representing the observed data.
- scale - Boolean to indicate whether the probability should be
scaled by ``P(observation)``. By default, no scaling is done.
A dictionary is returned where the key is the class and the value is
the log probability of the class. | def calculate(nb, observation, scale=False):
"""Calculate the logarithmic conditional probability for each class.
Arguments:
- nb - A NaiveBayes classifier that has been trained.
- observation - A list representing the observed data.
- scale - Boolean to indicate whether the probability should be
scaled by ``P(observation)``. By default, no scaling is done.
A dictionary is returned where the key is the class and the value is
the log probability of the class.
"""
# P(class|observation) = P(observation|class)*P(class)/P(observation)
# Taking the log:
# lP(class|observation) = lP(observation|class)+lP(class)-lP(observation)
# Make sure the observation has the right dimensionality.
if len(observation) != nb.dimensionality:
raise ValueError(
f"observation in {len(observation)} dimension,"
f" but classifier in {nb.dimensionality}"
)
# Calculate log P(observation|class) for every class.
n = len(nb.classes)
lp_observation_class = np.zeros(n) # array of log P(observation|class)
for i in range(n):
# log P(observation|class) = SUM_i log P(observation_i|class)
probs = [None] * len(observation)
for j in range(len(observation)):
probs[j] = nb.p_conditional[i][j].get(observation[j], 0)
lprobs = np.log(np.clip(probs, 1.0e-300, 1.0e300))
lp_observation_class[i] = sum(lprobs)
# Calculate log P(class).
lp_prior = np.log(nb.p_prior)
# Calculate log P(observation).
lp_observation = 0.0 # P(observation)
if scale: # Only calculate this if requested.
# log P(observation) = log SUM_i P(observation|class_i)P(class_i)
obs = np.exp(np.clip(lp_prior + lp_observation_class, -700, +700))
lp_observation = np.log(sum(obs))
# Calculate log P(class|observation).
lp_class_observation = {} # Dict of class : log P(class|observation)
for i in range(len(nb.classes)):
lp_class_observation[nb.classes[i]] = (
lp_observation_class[i] + lp_prior[i] - lp_observation
)
return lp_class_observation |
Classify an observation into a class. | def classify(nb, observation):
"""Classify an observation into a class."""
# The class is the one with the highest probability.
probs = calculate(nb, observation, scale=False)
max_prob = max_class = None
for klass in nb.classes:
if max_prob is None or probs[klass] > max_prob:
max_prob, max_class = probs[klass], klass
return max_class |
Train a NaiveBayes classifier on a training set.
Arguments:
- training_set - List of observations.
- results - List of the class assignments for each observation.
Thus, training_set and results must be the same length.
- priors - Optional dictionary specifying the prior probabilities
for each type of result. If not specified, the priors will
be estimated from the training results. | def train(training_set, results, priors=None, typecode=None):
"""Train a NaiveBayes classifier on a training set.
Arguments:
- training_set - List of observations.
- results - List of the class assignments for each observation.
Thus, training_set and results must be the same length.
- priors - Optional dictionary specifying the prior probabilities
for each type of result. If not specified, the priors will
be estimated from the training results.
"""
if not len(training_set):
raise ValueError("No data in the training set.")
if len(training_set) != len(results):
raise ValueError("training_set and results should be parallel lists.")
# If no typecode is specified, try to pick a reasonable one. If
# training_set is a Numeric array, then use that typecode.
# Otherwise, choose a reasonable default.
# XXX NOT IMPLEMENTED
# Check to make sure each vector in the training set has the same
# dimensionality.
dimensions = [len(x) for x in training_set]
if min(dimensions) != max(dimensions):
raise ValueError("observations have different dimensionality")
nb = NaiveBayes()
nb.dimensionality = dimensions[0]
# Get a list of all the classes, and
# estimate the prior probabilities for the classes.
if priors is not None:
percs = priors
nb.classes = list(set(results))
else:
class_freq = _contents(results)
nb.classes = list(class_freq.keys())
percs = class_freq
nb.classes.sort() # keep it tidy
nb.p_prior = np.zeros(len(nb.classes))
for i in range(len(nb.classes)):
nb.p_prior[i] = percs[nb.classes[i]]
# Collect all the observations in class. For each class, make a
# matrix of training instances versus dimensions. I might be able
# to optimize this with Numeric, if the training_set parameter
# were guaranteed to be a matrix. However, this may not be the
# case, because the client may be hacking up a sparse matrix or
# something.
c2i = {} # class to index of class
for index, key in enumerate(nb.classes):
c2i[key] = index
observations = [[] for c in nb.classes] # separate observations by class
for i in range(len(results)):
klass, obs = results[i], training_set[i]
observations[c2i[klass]].append(obs)
# Now make the observations Numeric matrix.
for i in range(len(observations)):
# XXX typecode must be specified!
observations[i] = np.asarray(observations[i], typecode)
# Calculate P(value|class,dim) for every class.
# This is a good loop to optimize.
nb.p_conditional = []
for i in range(len(nb.classes)):
class_observations = observations[i] # observations for this class
nb.p_conditional.append([None] * nb.dimensionality)
for j in range(nb.dimensionality):
# Collect all the values in this dimension.
values = class_observations[:, j]
# Add pseudocounts here. This needs to be parameterized.
# values = list(values) + range(len(nb.classes)) # XXX add 1
# Estimate P(value|class,dim)
nb.p_conditional[i][j] = _contents(values)
return nb |
Return optimal alignments between two sequences (PRIVATE).
This method either returns a list of optimal alignments (with the same
score) or just the optimal score. | def _align(
sequenceA,
sequenceB,
match_fn,
gap_A_fn,
gap_B_fn,
penalize_extend_when_opening,
penalize_end_gaps,
align_globally,
gap_char,
force_generic,
score_only,
one_alignment_only,
):
"""Return optimal alignments between two sequences (PRIVATE).
This method either returns a list of optimal alignments (with the same
score) or just the optimal score.
"""
if not sequenceA or not sequenceB:
return []
try:
sequenceA + gap_char
sequenceB + gap_char
except TypeError:
raise TypeError(
"both sequences must be of the same type, either "
"string/sequence object or list. Gap character must "
"fit the sequence type (string or list)"
)
if not isinstance(sequenceA, list):
sequenceA = str(sequenceA)
if not isinstance(sequenceB, list):
sequenceB = str(sequenceB)
if not align_globally and (penalize_end_gaps[0] or penalize_end_gaps[1]):
warnings.warn(
'"penalize_end_gaps" should not be used in local '
"alignments. The resulting score may be wrong.",
BiopythonWarning,
)
if (
(not force_generic)
and isinstance(gap_A_fn, affine_penalty)
and isinstance(gap_B_fn, affine_penalty)
):
open_A, extend_A = gap_A_fn.open, gap_A_fn.extend
open_B, extend_B = gap_B_fn.open, gap_B_fn.extend
matrices = _make_score_matrix_fast(
sequenceA,
sequenceB,
match_fn,
open_A,
extend_A,
open_B,
extend_B,
penalize_extend_when_opening,
penalize_end_gaps,
align_globally,
score_only,
)
else:
matrices = _make_score_matrix_generic(
sequenceA,
sequenceB,
match_fn,
gap_A_fn,
gap_B_fn,
penalize_end_gaps,
align_globally,
score_only,
)
score_matrix, trace_matrix, best_score = matrices
# print("SCORE %s" % print_matrix(score_matrix))
# print("TRACEBACK %s" % print_matrix(trace_matrix))
# If they only want the score, then return it.
if score_only:
return best_score
starts = _find_start(score_matrix, best_score, align_globally)
# Recover the alignments and return them.
alignments = _recover_alignments(
sequenceA,
sequenceB,
starts,
best_score,
score_matrix,
trace_matrix,
align_globally,
gap_char,
one_alignment_only,
gap_A_fn,
gap_B_fn,
)
if not alignments:
# This may happen, see recover_alignments for explanation
score_matrix, trace_matrix = _reverse_matrices(score_matrix, trace_matrix)
starts = [(z, (y, x)) for z, (x, y) in starts]
alignments = _recover_alignments(
sequenceB,
sequenceA,
starts,
best_score,
score_matrix,
trace_matrix,
align_globally,
gap_char,
one_alignment_only,
gap_B_fn,
gap_A_fn,
reverse=True,
)
return alignments |
Generate a score and traceback matrix (PRIVATE).
This implementation according to Needleman-Wunsch allows the usage of
general gap functions and is rather slow. It is automatically called if
you define your own gap functions. You can force the usage of this method
with ``force_generic=True``. | def _make_score_matrix_generic(
sequenceA,
sequenceB,
match_fn,
gap_A_fn,
gap_B_fn,
penalize_end_gaps,
align_globally,
score_only,
):
"""Generate a score and traceback matrix (PRIVATE).
This implementation according to Needleman-Wunsch allows the usage of
general gap functions and is rather slow. It is automatically called if
you define your own gap functions. You can force the usage of this method
with ``force_generic=True``.
"""
local_max_score = 0
# Create the score and traceback matrices. These should be in the
# shape:
# sequenceA (down) x sequenceB (across)
lenA, lenB = len(sequenceA), len(sequenceB)
score_matrix, trace_matrix = [], []
for i in range(lenA + 1):
score_matrix.append([None] * (lenB + 1))
if not score_only:
trace_matrix.append([None] * (lenB + 1))
# Initialize first row and column with gap scores. This is like opening up
# i gaps at the beginning of sequence A or B.
for i in range(lenA + 1):
if penalize_end_gaps[1]: # [1]:gap in sequence B
score = gap_B_fn(0, i)
else:
score = 0.0
score_matrix[i][0] = score
for i in range(lenB + 1):
if penalize_end_gaps[0]: # [0]:gap in sequence A
score = gap_A_fn(0, i)
else:
score = 0.0
score_matrix[0][i] = score
# Fill in the score matrix. Each position in the matrix
# represents an alignment between a character from sequence A to
# one in sequence B. As I iterate through the matrix, find the
# alignment by choose the best of:
# 1) extending a previous alignment without gaps
# 2) adding a gap in sequenceA
# 3) adding a gap in sequenceB
for row in range(1, lenA + 1):
for col in range(1, lenB + 1):
# First, calculate the score that would occur by extending
# the alignment without gaps.
# fmt: off
nogap_score = (
score_matrix[row - 1][col - 1]
+ match_fn(sequenceA[row - 1], sequenceB[col - 1])
)
# fmt: on
# Try to find a better score by opening gaps in sequenceA.
# Do this by checking alignments from each column in the row.
# Each column represents a different character to align from,
# and thus a different length gap.
# Although the gap function does not distinguish between opening
# and extending a gap, we distinguish them for the backtrace.
if not penalize_end_gaps[0] and row == lenA:
row_open = score_matrix[row][col - 1]
row_extend = max(score_matrix[row][x] for x in range(col))
else:
row_open = score_matrix[row][col - 1] + gap_A_fn(row, 1)
row_extend = max(
score_matrix[row][x] + gap_A_fn(row, col - x) for x in range(col)
)
# Try to find a better score by opening gaps in sequenceB.
if not penalize_end_gaps[1] and col == lenB:
col_open = score_matrix[row - 1][col]
col_extend = max(score_matrix[x][col] for x in range(row))
else:
col_open = score_matrix[row - 1][col] + gap_B_fn(col, 1)
col_extend = max(
score_matrix[x][col] + gap_B_fn(col, row - x) for x in range(row)
)
best_score = max(nogap_score, row_open, row_extend, col_open, col_extend)
local_max_score = max(local_max_score, best_score)
if not align_globally and best_score < 0:
score_matrix[row][col] = 0.0
else:
score_matrix[row][col] = best_score
# The backtrace is encoded binary. See _make_score_matrix_fast
# for details.
if not score_only:
trace_score = 0
if rint(nogap_score) == rint(best_score):
trace_score += 2
if rint(row_open) == rint(best_score):
trace_score += 1
if rint(row_extend) == rint(best_score):
trace_score += 8
if rint(col_open) == rint(best_score):
trace_score += 4
if rint(col_extend) == rint(best_score):
trace_score += 16
trace_matrix[row][col] = trace_score
if not align_globally:
best_score = local_max_score
return score_matrix, trace_matrix, best_score |
Generate a score and traceback matrix according to Gotoh (PRIVATE).
This is an implementation of the Needleman-Wunsch dynamic programming
algorithm as modified by Gotoh, implementing affine gap penalties.
In short, we have three matrices, holding scores for alignments ending
in (1) a match/mismatch, (2) a gap in sequence A, and (3) a gap in
sequence B, respectively. However, we can combine them in one matrix,
which holds the best scores, and store only those values from the
other matrices that are actually used for the next step of calculation.
The traceback matrix holds the positions for backtracing the alignment. | def _make_score_matrix_fast(
sequenceA,
sequenceB,
match_fn,
open_A,
extend_A,
open_B,
extend_B,
penalize_extend_when_opening,
penalize_end_gaps,
align_globally,
score_only,
):
"""Generate a score and traceback matrix according to Gotoh (PRIVATE).
This is an implementation of the Needleman-Wunsch dynamic programming
algorithm as modified by Gotoh, implementing affine gap penalties.
In short, we have three matrices, holding scores for alignments ending
in (1) a match/mismatch, (2) a gap in sequence A, and (3) a gap in
sequence B, respectively. However, we can combine them in one matrix,
which holds the best scores, and store only those values from the
other matrices that are actually used for the next step of calculation.
The traceback matrix holds the positions for backtracing the alignment.
"""
first_A_gap = calc_affine_penalty(1, open_A, extend_A, penalize_extend_when_opening)
first_B_gap = calc_affine_penalty(1, open_B, extend_B, penalize_extend_when_opening)
local_max_score = 0
# Create the score and traceback matrices. These should be in the
# shape:
# sequenceA (down) x sequenceB (across)
lenA, lenB = len(sequenceA), len(sequenceB)
score_matrix, trace_matrix = [], []
for i in range(lenA + 1):
score_matrix.append([None] * (lenB + 1))
if not score_only:
trace_matrix.append([None] * (lenB + 1))
# Initialize first row and column with gap scores. This is like opening up
# i gaps at the beginning of sequence A or B.
for i in range(lenA + 1):
if penalize_end_gaps[1]: # [1]:gap in sequence B
score = calc_affine_penalty(
i, open_B, extend_B, penalize_extend_when_opening
)
else:
score = 0
score_matrix[i][0] = score
for i in range(lenB + 1):
if penalize_end_gaps[0]: # [0]:gap in sequence A
score = calc_affine_penalty(
i, open_A, extend_A, penalize_extend_when_opening
)
else:
score = 0
score_matrix[0][i] = score
# Now initialize the col 'matrix'. Actually this is only a one dimensional
# list, since we only need the col scores from the last row.
col_score = [0] # Best score, if actual alignment ends with gap in seqB
for i in range(1, lenB + 1):
col_score.append(
calc_affine_penalty(i, 2 * open_B, extend_B, penalize_extend_when_opening)
)
# The row 'matrix' is calculated on the fly. Here we only need the actual
# score.
# Now, filling up the score and traceback matrices:
for row in range(1, lenA + 1):
row_score = calc_affine_penalty(
row, 2 * open_A, extend_A, penalize_extend_when_opening
)
for col in range(1, lenB + 1):
# Calculate the score that would occur by extending the
# alignment without gaps.
# fmt: off
nogap_score = (
score_matrix[row - 1][col - 1]
+ match_fn(sequenceA[row - 1], sequenceB[col - 1])
)
# fmt: on
# Check the score that would occur if there were a gap in
# sequence A. This could come from opening a new gap or
# extending an existing one.
# A gap in sequence A can also be opened if it follows a gap in
# sequence B: A-
# -B
if not penalize_end_gaps[0] and row == lenA:
row_open = score_matrix[row][col - 1]
row_extend = row_score
else:
row_open = score_matrix[row][col - 1] + first_A_gap
row_extend = row_score + extend_A
row_score = max(row_open, row_extend)
# The same for sequence B:
if not penalize_end_gaps[1] and col == lenB:
col_open = score_matrix[row - 1][col]
col_extend = col_score[col]
else:
col_open = score_matrix[row - 1][col] + first_B_gap
col_extend = col_score[col] + extend_B
col_score[col] = max(col_open, col_extend)
best_score = max(nogap_score, col_score[col], row_score)
local_max_score = max(local_max_score, best_score)
if not align_globally and best_score < 0:
score_matrix[row][col] = 0
else:
score_matrix[row][col] = best_score
# Now the trace_matrix. The edges of the backtrace are encoded
# binary: 1 = open gap in seqA, 2 = match/mismatch of seqA and
# seqB, 4 = open gap in seqB, 8 = extend gap in seqA, and
# 16 = extend gap in seqB. This values can be summed up.
# Thus, the trace score 7 means that the best score can either
# come from opening a gap in seqA (=1), pairing two characters
# of seqA and seqB (+2=3) or opening a gap in seqB (+4=7).
# However, if we only want the score we don't care about the trace.
if not score_only:
row_score_rint = rint(row_score)
col_score_rint = rint(col_score[col])
row_trace_score = 0
col_trace_score = 0
if rint(row_open) == row_score_rint:
row_trace_score += 1 # Open gap in seqA
if rint(row_extend) == row_score_rint:
row_trace_score += 8 # Extend gap in seqA
if rint(col_open) == col_score_rint:
col_trace_score += 4 # Open gap in seqB
if rint(col_extend) == col_score_rint:
col_trace_score += 16 # Extend gap in seqB
trace_score = 0
best_score_rint = rint(best_score)
if rint(nogap_score) == best_score_rint:
trace_score += 2 # Align seqA with seqB
if row_score_rint == best_score_rint:
trace_score += row_trace_score
if col_score_rint == best_score_rint:
trace_score += col_trace_score
trace_matrix[row][col] = trace_score
if not align_globally:
best_score = local_max_score
return score_matrix, trace_matrix, best_score |
Do the backtracing and return a list of alignments (PRIVATE).
Recover the alignments by following the traceback matrix. This
is a recursive procedure, but it's implemented here iteratively
with a stack.
sequenceA and sequenceB may be sequences, including strings,
lists, or list-like objects. In order to preserve the type of
the object, we need to use slices on the sequences instead of
indexes. For example, sequenceA[row] may return a type that's
not compatible with sequenceA, e.g. if sequenceA is a list and
sequenceA[row] is a string. Thus, avoid using indexes and use
slices, e.g. sequenceA[row:row+1]. Assume that client-defined
sequence classes preserve these semantics. | def _recover_alignments(
sequenceA,
sequenceB,
starts,
best_score,
score_matrix,
trace_matrix,
align_globally,
gap_char,
one_alignment_only,
gap_A_fn,
gap_B_fn,
reverse=False,
):
"""Do the backtracing and return a list of alignments (PRIVATE).
Recover the alignments by following the traceback matrix. This
is a recursive procedure, but it's implemented here iteratively
with a stack.
sequenceA and sequenceB may be sequences, including strings,
lists, or list-like objects. In order to preserve the type of
the object, we need to use slices on the sequences instead of
indexes. For example, sequenceA[row] may return a type that's
not compatible with sequenceA, e.g. if sequenceA is a list and
sequenceA[row] is a string. Thus, avoid using indexes and use
slices, e.g. sequenceA[row:row+1]. Assume that client-defined
sequence classes preserve these semantics.
"""
lenA, lenB = len(sequenceA), len(sequenceB)
ali_seqA, ali_seqB = sequenceA[0:0], sequenceB[0:0]
tracebacks = []
in_process = []
for start in starts:
score, (row, col) = start
begin = 0
if align_globally:
end = None
else:
# If this start is a zero-extension: don't start here!
if (score, (row - 1, col - 1)) in starts:
continue
# Local alignments should start with a positive score!
if score <= 0:
continue
# Local alignments should not end with a gap!:
trace = trace_matrix[row][col]
if (trace - trace % 2) % 4 == 2: # Trace contains 'nogap', fine!
trace_matrix[row][col] = 2
# If not, don't start here!
else:
continue
end = -max(lenA - row, lenB - col)
if not end:
end = None
col_distance = lenB - col
row_distance = lenA - row
# fmt: off
ali_seqA = (
(col_distance - row_distance) * gap_char
+ sequenceA[lenA - 1 : row - 1 : -1]
)
ali_seqB = (
(row_distance - col_distance) * gap_char
+ sequenceB[lenB - 1 : col - 1 : -1]
)
# fmt: on
in_process += [
(ali_seqA, ali_seqB, end, row, col, False, trace_matrix[row][col])
]
while in_process and len(tracebacks) < MAX_ALIGNMENTS:
# Although we allow a gap in seqB to be followed by a gap in seqA,
# we don't want to allow it the other way round, since this would
# give redundant alignments of type: A- vs. -A
# -B B-
# Thus we need to keep track if a gap in seqA was opened (col_gap)
# and stop the backtrace (dead_end) if a gap in seqB follows.
#
# Attention: This may fail, if the gap-penalties for both strands are
# different. In this case the second alignment may be the only optimal
# alignment. Thus it can happen that no alignment is returned. For
# this case a workaround was implemented, which reverses the input and
# the matrices (this happens in _reverse_matrices) and repeats the
# backtrace. The variable 'reverse' keeps track of this.
dead_end = False
ali_seqA, ali_seqB, end, row, col, col_gap, trace = in_process.pop()
while (row > 0 or col > 0) and not dead_end:
cache = (ali_seqA[:], ali_seqB[:], end, row, col, col_gap)
# If trace is empty we have reached at least one border of the
# matrix or the end of a local alignment. Just add the rest of
# the sequence(s) and fill with gaps if necessary.
if not trace:
if col and col_gap:
dead_end = True
else:
ali_seqA, ali_seqB = _finish_backtrace(
sequenceA, sequenceB, ali_seqA, ali_seqB, row, col, gap_char
)
break
elif trace % 2 == 1: # = row open = open gap in seqA
trace -= 1
if col_gap:
dead_end = True
else:
col -= 1
ali_seqA += gap_char
ali_seqB += sequenceB[col : col + 1]
col_gap = False
elif trace % 4 == 2: # = match/mismatch of seqA with seqB
trace -= 2
row -= 1
col -= 1
ali_seqA += sequenceA[row : row + 1]
ali_seqB += sequenceB[col : col + 1]
col_gap = False
elif trace % 8 == 4: # = col open = open gap in seqB
trace -= 4
row -= 1
ali_seqA += sequenceA[row : row + 1]
ali_seqB += gap_char
col_gap = True
elif trace in (8, 24): # = row extend = extend gap in seqA
trace -= 8
if col_gap:
dead_end = True
else:
col_gap = False
# We need to find the starting point of the extended gap
x = _find_gap_open(
sequenceA,
sequenceB,
ali_seqA,
ali_seqB,
end,
row,
col,
col_gap,
gap_char,
score_matrix,
trace_matrix,
in_process,
gap_A_fn,
col,
row,
"col",
best_score,
align_globally,
)
ali_seqA, ali_seqB, row, col, in_process, dead_end = x
elif trace == 16: # = col extend = extend gap in seqB
trace -= 16
col_gap = True
x = _find_gap_open(
sequenceA,
sequenceB,
ali_seqA,
ali_seqB,
end,
row,
col,
col_gap,
gap_char,
score_matrix,
trace_matrix,
in_process,
gap_B_fn,
row,
col,
"row",
best_score,
align_globally,
)
ali_seqA, ali_seqB, row, col, in_process, dead_end = x
if trace: # There is another path to follow...
cache += (trace,)
in_process.append(cache)
trace = trace_matrix[row][col]
if not align_globally:
if score_matrix[row][col] == best_score:
# We have gone through a 'zero-score' extension, discard it
dead_end = True
elif score_matrix[row][col] <= 0:
# We have reached the end of the backtrace
begin = max(row, col)
trace = 0
if not dead_end:
if not reverse:
tracebacks.append((ali_seqA[::-1], ali_seqB[::-1], score, begin, end))
else:
tracebacks.append((ali_seqB[::-1], ali_seqA[::-1], score, begin, end))
if one_alignment_only:
break
return _clean_alignments(tracebacks) |
Return a list of starting points (score, (row, col)) (PRIVATE).
Indicating every possible place to start the tracebacks. | def _find_start(score_matrix, best_score, align_globally):
"""Return a list of starting points (score, (row, col)) (PRIVATE).
Indicating every possible place to start the tracebacks.
"""
nrows, ncols = len(score_matrix), len(score_matrix[0])
# In this implementation of the global algorithm, the start will always be
# the bottom right corner of the matrix.
if align_globally:
starts = [(best_score, (nrows - 1, ncols - 1))]
else:
# For local alignments, there may be many different start points.
starts = []
tolerance = 0 # XXX do anything with this?
# Now find all the positions within some tolerance of the best
# score.
for row in range(nrows):
for col in range(ncols):
score = score_matrix[row][col]
if rint(abs(score - best_score)) <= rint(tolerance):
starts.append((score, (row, col)))
return starts |
Reverse score and trace matrices (PRIVATE). | def _reverse_matrices(score_matrix, trace_matrix):
"""Reverse score and trace matrices (PRIVATE)."""
reverse_score_matrix = []
reverse_trace_matrix = []
# fmt: off
reverse_trace = {
1: 4, 2: 2, 3: 6, 4: 1, 5: 5, 6: 3, 7: 7, 8: 16, 9: 20, 10: 18, 11: 22, 12: 17,
13: 21, 14: 19, 15: 23, 16: 8, 17: 12, 18: 10, 19: 14, 20: 9, 21: 13, 22: 11,
23: 15, 24: 24, 25: 28, 26: 26, 27: 30, 28: 25, 29: 29, 30: 27, 31: 31,
None: None,
}
# fmt: on
for col in range(len(score_matrix[0])):
new_score_row = []
new_trace_row = []
for row in range(len(score_matrix)):
new_score_row.append(score_matrix[row][col])
new_trace_row.append(reverse_trace[trace_matrix[row][col]])
reverse_score_matrix.append(new_score_row)
reverse_trace_matrix.append(new_trace_row)
return reverse_score_matrix, reverse_trace_matrix |
Take a list of alignments and return a cleaned version (PRIVATE).
Remove duplicates, make sure begin and end are set correctly, remove
empty alignments. | def _clean_alignments(alignments):
"""Take a list of alignments and return a cleaned version (PRIVATE).
Remove duplicates, make sure begin and end are set correctly, remove
empty alignments.
"""
unique_alignments = []
for align in alignments:
if align not in unique_alignments:
unique_alignments.append(align)
i = 0
while i < len(unique_alignments):
seqA, seqB, score, begin, end = unique_alignments[i]
# Make sure end is set reasonably.
if end is None: # global alignment
end = len(seqA)
elif end < 0:
end = end + len(seqA)
# If there's no alignment here, get rid of it.
if begin >= end:
del unique_alignments[i]
continue
unique_alignments[i] = Alignment(seqA, seqB, score, begin, end)
i += 1
return unique_alignments |
Add remaining sequences and fill with gaps if necessary (PRIVATE). | def _finish_backtrace(sequenceA, sequenceB, ali_seqA, ali_seqB, row, col, gap_char):
"""Add remaining sequences and fill with gaps if necessary (PRIVATE)."""
if row:
ali_seqA += sequenceA[row - 1 :: -1]
if col:
ali_seqB += sequenceB[col - 1 :: -1]
if row > col:
ali_seqB += gap_char * (len(ali_seqA) - len(ali_seqB))
elif col > row:
ali_seqA += gap_char * (len(ali_seqB) - len(ali_seqA))
return ali_seqA, ali_seqB |
Find the starting point(s) of the extended gap (PRIVATE). | def _find_gap_open(
sequenceA,
sequenceB,
ali_seqA,
ali_seqB,
end,
row,
col,
col_gap,
gap_char,
score_matrix,
trace_matrix,
in_process,
gap_fn,
target,
index,
direction,
best_score,
align_globally,
):
"""Find the starting point(s) of the extended gap (PRIVATE)."""
dead_end = False
target_score = score_matrix[row][col]
for n in range(target):
if direction == "col":
col -= 1
ali_seqA += gap_char
ali_seqB += sequenceB[col : col + 1]
else:
row -= 1
ali_seqA += sequenceA[row : row + 1]
ali_seqB += gap_char
actual_score = score_matrix[row][col] + gap_fn(index, n + 1)
if not align_globally and score_matrix[row][col] == best_score:
# We have run through a 'zero-score' extension and discard it
dead_end = True
break
if rint(actual_score) == rint(target_score) and n > 0:
if not trace_matrix[row][col]:
break
else:
in_process.append(
(
ali_seqA[:],
ali_seqB[:],
end,
row,
col,
col_gap,
trace_matrix[row][col],
)
)
if not trace_matrix[row][col]:
dead_end = True
return ali_seqA, ali_seqB, row, col, in_process, dead_end |
Print number with declared precision. | def rint(x, precision=_PRECISION):
"""Print number with declared precision."""
return int(x * precision + 0.5) |
Calculate a penalty score for the gap function. | def calc_affine_penalty(length, open, extend, penalize_extend_when_opening):
"""Calculate a penalty score for the gap function."""
if length <= 0:
return 0.0
penalty = open + extend * length
if not penalize_extend_when_opening:
penalty -= extend
return penalty |
Print out a matrix for debugging purposes. | def print_matrix(matrix):
"""Print out a matrix for debugging purposes."""
# Transpose the matrix and get the length of the values in each column.
matrixT = [[] for x in range(len(matrix[0]))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrixT[j].append(len(str(matrix[i][j])))
ndigits = [max(x) for x in matrixT]
for i in range(len(matrix)):
# Using string formatting trick to add leading spaces,
print(
" ".join("%*s " % (ndigits[j], matrix[i][j]) for j in range(len(matrix[i])))
) |
Format the alignment prettily into a string.
IMPORTANT: Gap symbol must be "-" (or ['-'] for lists)!
Since Biopython 1.71 identical matches are shown with a pipe
character, mismatches as a dot, and gaps as a space.
Prior releases just used the pipe character to indicate the
aligned region (matches, mismatches and gaps).
Also, in local alignments, if the alignment does not include
the whole sequences, now only the aligned part is shown,
together with the start positions of the aligned subsequences.
The start positions are 1-based; so start position n is the
n-th base/amino acid in the *un-aligned* sequence.
NOTE: This is different to the alignment's begin/end values,
which give the Python indices (0-based) of the bases/amino acids
in the *aligned* sequences.
If you want to restore the 'historic' behaviour, that means
displaying the whole sequences (including the non-aligned parts),
use ``full_sequences=True``. In this case, the non-aligned leading
and trailing parts are also indicated by spaces in the match-line. | def format_alignment(align1, align2, score, begin, end, full_sequences=False):
"""Format the alignment prettily into a string.
IMPORTANT: Gap symbol must be "-" (or ['-'] for lists)!
Since Biopython 1.71 identical matches are shown with a pipe
character, mismatches as a dot, and gaps as a space.
Prior releases just used the pipe character to indicate the
aligned region (matches, mismatches and gaps).
Also, in local alignments, if the alignment does not include
the whole sequences, now only the aligned part is shown,
together with the start positions of the aligned subsequences.
The start positions are 1-based; so start position n is the
n-th base/amino acid in the *un-aligned* sequence.
NOTE: This is different to the alignment's begin/end values,
which give the Python indices (0-based) of the bases/amino acids
in the *aligned* sequences.
If you want to restore the 'historic' behaviour, that means
displaying the whole sequences (including the non-aligned parts),
use ``full_sequences=True``. In this case, the non-aligned leading
and trailing parts are also indicated by spaces in the match-line.
"""
align_begin = begin
align_end = end
start1 = start2 = ""
start_m = begin # Begin of match line (how many spaces to include)
# For local alignments:
if not full_sequences and (begin != 0 or end != len(align1)):
# Calculate the actual start positions in the un-aligned sequences
# This will only work if the gap symbol is '-' or ['-']!
start1 = str(len(align1[:begin]) - align1[:begin].count("-") + 1) + " "
start2 = str(len(align2[:begin]) - align2[:begin].count("-") + 1) + " "
start_m = max(len(start1), len(start2))
elif full_sequences:
start_m = 0
begin = 0
end = len(align1)
if isinstance(align1, list):
# List elements will be separated by spaces, since they can be
# of different lengths
align1 = [a + " " for a in align1]
align2 = [a + " " for a in align2]
s1_line = ["{:>{width}}".format(start1, width=start_m)] # seq1 line
m_line = [" " * start_m] # match line
s2_line = ["{:>{width}}".format(start2, width=start_m)] # seq2 line
for n, (a, b) in enumerate(zip(align1[begin:end], align2[begin:end])):
# Since list elements can be of different length, we center them,
# using the maximum length of the two compared elements as width
m_len = max(len(a), len(b))
s1_line.append("{:^{width}}".format(a, width=m_len))
s2_line.append("{:^{width}}".format(b, width=m_len))
if full_sequences and (n < align_begin or n >= align_end):
m_line.append("{:^{width}}".format(" ", width=m_len)) # space
continue
if a == b:
m_line.append("{:^{width}}".format("|", width=m_len)) # match
elif a.strip() == "-" or b.strip() == "-":
m_line.append("{:^{width}}".format(" ", width=m_len)) # gap
else:
m_line.append("{:^{width}}".format(".", width=m_len)) # mismatch
s2_line.append(f"\n Score={score:g}\n")
return "\n".join(["".join(s1_line), "".join(m_line), "".join(s2_line)]) |
Make a python string translation table (PRIVATE).
Arguments:
- complement_mapping - a dictionary such as ambiguous_dna_complement
and ambiguous_rna_complement from Data.IUPACData.
Returns a translation table (a bytes object of length 256) for use with
the python string's translate method to use in a (reverse) complement.
Compatible with lower case and upper case sequences.
For internal use only. | def _maketrans(complement_mapping):
"""Make a python string translation table (PRIVATE).
Arguments:
- complement_mapping - a dictionary such as ambiguous_dna_complement
and ambiguous_rna_complement from Data.IUPACData.
Returns a translation table (a bytes object of length 256) for use with
the python string's translate method to use in a (reverse) complement.
Compatible with lower case and upper case sequences.
For internal use only.
"""
keys = "".join(complement_mapping.keys()).encode("ASCII")
values = "".join(complement_mapping.values()).encode("ASCII")
return bytes.maketrans(keys + keys.lower(), values + values.lower()) |
Transcribe a DNA sequence into RNA.
Following the usual convention, the sequence is interpreted as the
coding strand of the DNA double helix, not the template strand. This
means we can get the RNA sequence just by switching T to U.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> transcribe("ACTGN")
'ACUGN' | def transcribe(dna):
"""Transcribe a DNA sequence into RNA.
Following the usual convention, the sequence is interpreted as the
coding strand of the DNA double helix, not the template strand. This
means we can get the RNA sequence just by switching T to U.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> transcribe("ACTGN")
'ACUGN'
"""
if isinstance(dna, Seq):
return dna.transcribe()
elif isinstance(dna, MutableSeq):
return Seq(dna).transcribe()
else:
return dna.replace("T", "U").replace("t", "u") |
Return the RNA sequence back-transcribed into DNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> back_transcribe("ACUGN")
'ACTGN' | def back_transcribe(rna):
"""Return the RNA sequence back-transcribed into DNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> back_transcribe("ACUGN")
'ACTGN'
"""
if isinstance(rna, Seq):
return rna.back_transcribe()
elif isinstance(rna, MutableSeq):
return Seq(rna).back_transcribe()
else:
return rna.replace("U", "T").replace("u", "t") |
Translate nucleotide string into a protein string (PRIVATE).
Arguments:
- sequence - a string
- table - Which codon table to use? This can be either a name (string),
an NCBI identifier (integer), or a CodonTable object (useful for
non-standard genetic codes). This defaults to the "Standard" table.
- stop_symbol - a single character string, what to use for terminators.
- to_stop - boolean, should translation terminate at the first
in frame stop codon? If there is no in-frame stop codon
then translation continues to the end.
- pos_stop - a single character string for a possible stop codon
(e.g. TAN or NNN)
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
Returns a string.
e.g.
>>> from Bio.Data import CodonTable
>>> table = CodonTable.ambiguous_dna_by_id[1]
>>> _translate_str("AAA", table)
'K'
>>> _translate_str("TAR", table)
'*'
>>> _translate_str("TAN", table)
'X'
>>> _translate_str("TAN", table, pos_stop="@")
'@'
>>> _translate_str("TA?", table)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Codon 'TA?' is invalid
In a change to older versions of Biopython, partial codons are now
always regarded as an error (previously only checked if cds=True)
and will trigger a warning (likely to become an exception in a
future release).
If **cds=True**, the start and stop codons are checked, and the start
codon will be translated at methionine. The sequence must be an
while number of codons.
>>> _translate_str("ATGCCCTAG", table, cds=True)
'MP'
>>> _translate_str("AAACCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: First codon 'AAA' is not a start codon
>>> _translate_str("ATGCCCTAGCCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Extra in frame stop codon 'TAG' found. | def _translate_str(
sequence, table, stop_symbol="*", to_stop=False, cds=False, pos_stop="X", gap=None
):
"""Translate nucleotide string into a protein string (PRIVATE).
Arguments:
- sequence - a string
- table - Which codon table to use? This can be either a name (string),
an NCBI identifier (integer), or a CodonTable object (useful for
non-standard genetic codes). This defaults to the "Standard" table.
- stop_symbol - a single character string, what to use for terminators.
- to_stop - boolean, should translation terminate at the first
in frame stop codon? If there is no in-frame stop codon
then translation continues to the end.
- pos_stop - a single character string for a possible stop codon
(e.g. TAN or NNN)
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
Returns a string.
e.g.
>>> from Bio.Data import CodonTable
>>> table = CodonTable.ambiguous_dna_by_id[1]
>>> _translate_str("AAA", table)
'K'
>>> _translate_str("TAR", table)
'*'
>>> _translate_str("TAN", table)
'X'
>>> _translate_str("TAN", table, pos_stop="@")
'@'
>>> _translate_str("TA?", table)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Codon 'TA?' is invalid
In a change to older versions of Biopython, partial codons are now
always regarded as an error (previously only checked if cds=True)
and will trigger a warning (likely to become an exception in a
future release).
If **cds=True**, the start and stop codons are checked, and the start
codon will be translated at methionine. The sequence must be an
while number of codons.
>>> _translate_str("ATGCCCTAG", table, cds=True)
'MP'
>>> _translate_str("AAACCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: First codon 'AAA' is not a start codon
>>> _translate_str("ATGCCCTAGCCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Extra in frame stop codon 'TAG' found.
"""
try:
table_id = int(table)
except ValueError:
# Assume it's a table name
# The same table can be used for RNA or DNA
try:
codon_table = CodonTable.ambiguous_generic_by_name[table]
except KeyError:
if isinstance(table, str):
raise ValueError(
"The Bio.Seq translate methods and function DO NOT "
"take a character string mapping table like the python "
"string object's translate method. "
"Use str(my_seq).translate(...) instead."
) from None
else:
raise TypeError("table argument must be integer or string") from None
except (AttributeError, TypeError):
# Assume it's a CodonTable object
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError("Bad table argument") from None
else:
# Assume it's a table ID
# The same table can be used for RNA or DNA
codon_table = CodonTable.ambiguous_generic_by_id[table_id]
sequence = sequence.upper()
amino_acids = []
forward_table = codon_table.forward_table
stop_codons = codon_table.stop_codons
if codon_table.nucleotide_alphabet is not None:
valid_letters = set(codon_table.nucleotide_alphabet.upper())
else:
# Assume the worst case, ambiguous DNA or RNA:
valid_letters = set(
IUPACData.ambiguous_dna_letters.upper()
+ IUPACData.ambiguous_rna_letters.upper()
)
n = len(sequence)
# Check for tables with 'ambiguous' (dual-coding) stop codons:
dual_coding = [c for c in stop_codons if c in forward_table]
if dual_coding:
c = dual_coding[0]
if to_stop:
raise ValueError(
"You cannot use 'to_stop=True' with this table as it contains"
f" {len(dual_coding)} codon(s) which can be both STOP and an"
f" amino acid (e.g. '{c}' -> '{forward_table[c]}' or STOP)."
)
warnings.warn(
f"This table contains {len(dual_coding)} codon(s) which code(s) for"
f" both STOP and an amino acid (e.g. '{c}' -> '{forward_table[c]}'"
" or STOP). Such codons will be translated as amino acid.",
BiopythonWarning,
)
if cds:
if str(sequence[:3]).upper() not in codon_table.start_codons:
raise CodonTable.TranslationError(
f"First codon '{sequence[:3]}' is not a start codon"
)
if n % 3 != 0:
raise CodonTable.TranslationError(
f"Sequence length {n} is not a multiple of three"
)
if str(sequence[-3:]).upper() not in stop_codons:
raise CodonTable.TranslationError(
f"Final codon '{sequence[-3:]}' is not a stop codon"
)
# Don't translate the stop symbol, and manually translate the M
sequence = sequence[3:-3]
n -= 6
amino_acids = ["M"]
elif n % 3 != 0:
warnings.warn(
"Partial codon, len(sequence) not a multiple of three. "
"Explicitly trim the sequence or add trailing N before "
"translation. This may become an error in future.",
BiopythonWarning,
)
if gap is not None:
if not isinstance(gap, str):
raise TypeError("Gap character should be a single character string.")
elif len(gap) > 1:
raise ValueError("Gap character should be a single character string.")
for i in range(0, n - n % 3, 3):
codon = sequence[i : i + 3]
try:
amino_acids.append(forward_table[codon])
except (KeyError, CodonTable.TranslationError):
if codon in codon_table.stop_codons:
if cds:
raise CodonTable.TranslationError(
f"Extra in frame stop codon '{codon}' found."
) from None
if to_stop:
break
amino_acids.append(stop_symbol)
elif valid_letters.issuperset(set(codon)):
# Possible stop codon (e.g. NNN or TAN)
amino_acids.append(pos_stop)
elif gap is not None and codon == gap * 3:
# Gapped translation
amino_acids.append(gap)
else:
raise CodonTable.TranslationError(
f"Codon '{codon}' is invalid"
) from None
return "".join(amino_acids) |
Translate a nucleotide sequence into amino acids.
If given a string, returns a new string object. Given a Seq or
MutableSeq, returns a Seq object.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable object
(useful for non-standard genetic codes). Defaults to the "Standard"
table.
- stop_symbol - Single character string, what to use for any
terminators, defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons
(translated as the specified stop_symbol). If
True, translation is terminated at the first in
frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
A simple string example using the default (standard) genetic code:
>>> coding_dna = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
>>> translate(coding_dna)
'VAIVMGR*KGAR*'
>>> translate(coding_dna, stop_symbol="@")
'VAIVMGR@KGAR@'
>>> translate(coding_dna, to_stop=True)
'VAIVMGR'
Now using NCBI table 2, where TGA is not a stop codon:
>>> translate(coding_dna, table=2)
'VAIVMGRWKGAR*'
>>> translate(coding_dna, table=2, to_stop=True)
'VAIVMGRWKGAR'
In fact this example uses an alternative start codon valid under NCBI
table 2, GTG, which means this example is a complete valid CDS which
when translated should really start with methionine (not valine):
>>> translate(coding_dna, table=2, cds=True)
'MAIVMGRWKGAR'
Note that if the sequence has no in-frame stop codon, then the to_stop
argument has no effect:
>>> coding_dna2 = "GTGGCCATTGTAATGGGCCGC"
>>> translate(coding_dna2)
'VAIVMGR'
>>> translate(coding_dna2, to_stop=True)
'VAIVMGR'
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
It will however translate either DNA or RNA.
NOTE - Since version 1.71 Biopython contains codon tables with 'ambiguous
stop codons'. These are stop codons with unambiguous sequence but which
have a context dependent coding as STOP or as amino acid. With these tables
'to_stop' must be False (otherwise a ValueError is raised). The dual
coding codons will always be translated as amino acid, except for
'cds=True', where the last codon will be translated as STOP.
>>> coding_dna3 = "ATGGCACGGAAGTGA"
>>> translate(coding_dna3)
'MARK*'
>>> translate(coding_dna3, table=27) # Table 27: TGA -> STOP or W
'MARKW'
It will however raise a BiopythonWarning (not shown).
>>> translate(coding_dna3, table=27, cds=True)
'MARK'
>>> translate(coding_dna3, table=27, to_stop=True)
Traceback (most recent call last):
...
ValueError: You cannot use 'to_stop=True' with this table ... | def translate(
sequence, table="Standard", stop_symbol="*", to_stop=False, cds=False, gap=None
):
"""Translate a nucleotide sequence into amino acids.
If given a string, returns a new string object. Given a Seq or
MutableSeq, returns a Seq object.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable object
(useful for non-standard genetic codes). Defaults to the "Standard"
table.
- stop_symbol - Single character string, what to use for any
terminators, defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons
(translated as the specified stop_symbol). If
True, translation is terminated at the first in
frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
A simple string example using the default (standard) genetic code:
>>> coding_dna = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
>>> translate(coding_dna)
'VAIVMGR*KGAR*'
>>> translate(coding_dna, stop_symbol="@")
'VAIVMGR@KGAR@'
>>> translate(coding_dna, to_stop=True)
'VAIVMGR'
Now using NCBI table 2, where TGA is not a stop codon:
>>> translate(coding_dna, table=2)
'VAIVMGRWKGAR*'
>>> translate(coding_dna, table=2, to_stop=True)
'VAIVMGRWKGAR'
In fact this example uses an alternative start codon valid under NCBI
table 2, GTG, which means this example is a complete valid CDS which
when translated should really start with methionine (not valine):
>>> translate(coding_dna, table=2, cds=True)
'MAIVMGRWKGAR'
Note that if the sequence has no in-frame stop codon, then the to_stop
argument has no effect:
>>> coding_dna2 = "GTGGCCATTGTAATGGGCCGC"
>>> translate(coding_dna2)
'VAIVMGR'
>>> translate(coding_dna2, to_stop=True)
'VAIVMGR'
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
It will however translate either DNA or RNA.
NOTE - Since version 1.71 Biopython contains codon tables with 'ambiguous
stop codons'. These are stop codons with unambiguous sequence but which
have a context dependent coding as STOP or as amino acid. With these tables
'to_stop' must be False (otherwise a ValueError is raised). The dual
coding codons will always be translated as amino acid, except for
'cds=True', where the last codon will be translated as STOP.
>>> coding_dna3 = "ATGGCACGGAAGTGA"
>>> translate(coding_dna3)
'MARK*'
>>> translate(coding_dna3, table=27) # Table 27: TGA -> STOP or W
'MARKW'
It will however raise a BiopythonWarning (not shown).
>>> translate(coding_dna3, table=27, cds=True)
'MARK'
>>> translate(coding_dna3, table=27, to_stop=True)
Traceback (most recent call last):
...
ValueError: You cannot use 'to_stop=True' with this table ...
"""
if isinstance(sequence, Seq):
return sequence.translate(table, stop_symbol, to_stop, cds)
elif isinstance(sequence, MutableSeq):
# Return a Seq object
return Seq(sequence).translate(table, stop_symbol, to_stop, cds)
else:
# Assume it's a string, return a string
return _translate_str(sequence, table, stop_symbol, to_stop, cds, gap=gap) |
Return the reverse complement as a DNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> reverse_complement(my_seq)
'TCG'
>>> my_seq = Seq("CGA")
>>> reverse_complement(my_seq)
Seq('TCG')
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement(my_seq)
MutableSeq('TCG')
>>> my_seq
MutableSeq('CGA')
Any U in the sequence is treated as a T:
>>> reverse_complement(Seq("CGAUT"))
Seq('AATCG')
In contrast, ``reverse_complement_rna`` returns an RNA sequence:
>>> reverse_complement_rna(Seq("CGAUT"))
Seq('AAUCG')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> reverse_complement("ACGTUacgtuXYZxyz")
'zrxZRXaacgtAACGT'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement(my_seq, inplace=True)
MutableSeq('TCG')
>>> my_seq
MutableSeq('TCG')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``. | def reverse_complement(sequence, inplace=False):
"""Return the reverse complement as a DNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> reverse_complement(my_seq)
'TCG'
>>> my_seq = Seq("CGA")
>>> reverse_complement(my_seq)
Seq('TCG')
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement(my_seq)
MutableSeq('TCG')
>>> my_seq
MutableSeq('CGA')
Any U in the sequence is treated as a T:
>>> reverse_complement(Seq("CGAUT"))
Seq('AATCG')
In contrast, ``reverse_complement_rna`` returns an RNA sequence:
>>> reverse_complement_rna(Seq("CGAUT"))
Seq('AAUCG')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> reverse_complement("ACGTUacgtuXYZxyz")
'zrxZRXaacgtAACGT'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement(my_seq, inplace=True)
MutableSeq('TCG')
>>> my_seq
MutableSeq('TCG')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``.
"""
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(sequence, (Seq, MutableSeq)):
return sequence.reverse_complement(inplace)
if isinstance(sequence, SeqRecord):
if inplace:
raise TypeError("SeqRecords are immutable")
return sequence.reverse_complement()
# Assume it's a string.
if inplace:
raise TypeError("strings are immutable")
sequence = sequence.encode("ASCII")
sequence = sequence.translate(_dna_complement_table)
sequence = sequence.decode("ASCII")
return sequence[::-1] |
Return the reverse complement as an RNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> reverse_complement_rna(my_seq)
'UCG'
>>> my_seq = Seq("CGA")
>>> reverse_complement_rna(my_seq)
Seq('UCG')
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement_rna(my_seq)
MutableSeq('UCG')
>>> my_seq
MutableSeq('CGA')
Any T in the sequence is treated as a U:
>>> reverse_complement_rna(Seq("CGAUT"))
Seq('AAUCG')
In contrast, ``reverse_complement`` returns a DNA sequence:
>>> reverse_complement(Seq("CGAUT"), inplace=False)
Seq('AATCG')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> reverse_complement_rna("ACGTUacgtuXYZxyz")
'zrxZRXaacguAACGU'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement_rna(my_seq, inplace=True)
MutableSeq('UCG')
>>> my_seq
MutableSeq('UCG')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``. | def reverse_complement_rna(sequence, inplace=False):
"""Return the reverse complement as an RNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> reverse_complement_rna(my_seq)
'UCG'
>>> my_seq = Seq("CGA")
>>> reverse_complement_rna(my_seq)
Seq('UCG')
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement_rna(my_seq)
MutableSeq('UCG')
>>> my_seq
MutableSeq('CGA')
Any T in the sequence is treated as a U:
>>> reverse_complement_rna(Seq("CGAUT"))
Seq('AAUCG')
In contrast, ``reverse_complement`` returns a DNA sequence:
>>> reverse_complement(Seq("CGAUT"), inplace=False)
Seq('AATCG')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> reverse_complement_rna("ACGTUacgtuXYZxyz")
'zrxZRXaacguAACGU'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> reverse_complement_rna(my_seq, inplace=True)
MutableSeq('UCG')
>>> my_seq
MutableSeq('UCG')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``.
"""
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(sequence, (Seq, MutableSeq)):
return sequence.reverse_complement_rna(inplace)
if isinstance(sequence, SeqRecord):
if inplace:
raise TypeError("SeqRecords are immutable")
return sequence.reverse_complement_rna()
# Assume it's a string.
if inplace:
raise TypeError("strings are immutable")
sequence = sequence.encode("ASCII")
sequence = sequence.translate(_rna_complement_table)
sequence = sequence.decode("ASCII")
return sequence[::-1] |
Return the complement as a DNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> complement(my_seq)
'GCT'
>>> my_seq = Seq("CGA")
>>> complement(my_seq)
Seq('GCT')
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq)
MutableSeq('GCT')
>>> my_seq
MutableSeq('CGA')
Any U in the sequence is treated as a T:
>>> complement(Seq("CGAUT"))
Seq('GCTAA')
In contrast, ``complement_rna`` returns an RNA sequence:
>>> complement_rna(Seq("CGAUT"))
Seq('GCUAA')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> complement("ACGTUacgtuXYZxyz")
'TGCAAtgcaaXRZxrz'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq, inplace=True)
MutableSeq('GCT')
>>> my_seq
MutableSeq('GCT')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``. | def complement(sequence, inplace=False):
"""Return the complement as a DNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> complement(my_seq)
'GCT'
>>> my_seq = Seq("CGA")
>>> complement(my_seq)
Seq('GCT')
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq)
MutableSeq('GCT')
>>> my_seq
MutableSeq('CGA')
Any U in the sequence is treated as a T:
>>> complement(Seq("CGAUT"))
Seq('GCTAA')
In contrast, ``complement_rna`` returns an RNA sequence:
>>> complement_rna(Seq("CGAUT"))
Seq('GCUAA')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> complement("ACGTUacgtuXYZxyz")
'TGCAAtgcaaXRZxrz'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq, inplace=True)
MutableSeq('GCT')
>>> my_seq
MutableSeq('GCT')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``.
"""
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(sequence, (Seq, MutableSeq)):
return sequence.complement(inplace)
if isinstance(sequence, SeqRecord):
if inplace:
raise TypeError("SeqRecords are immutable")
return sequence.complement()
# Assume it's a string.
if inplace is True:
raise TypeError("strings are immutable")
sequence = sequence.encode("ASCII")
sequence = sequence.translate(_dna_complement_table)
return sequence.decode("ASCII") |
Return the complement as an RNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> complement_rna(my_seq)
'GCU'
>>> my_seq = Seq("CGA")
>>> complement_rna(my_seq)
Seq('GCU')
>>> my_seq = MutableSeq("CGA")
>>> complement_rna(my_seq)
MutableSeq('GCU')
>>> my_seq
MutableSeq('CGA')
Any T in the sequence is treated as a U:
>>> complement_rna(Seq("CGAUT"))
Seq('GCUAA')
In contrast, ``complement`` returns a DNA sequence:
>>> complement(Seq("CGAUT"))
Seq('GCTAA')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> complement_rna("ACGTUacgtuXYZxyz")
'UGCAAugcaaXRZxrz'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq, inplace=True)
MutableSeq('GCT')
>>> my_seq
MutableSeq('GCT')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``. | def complement_rna(sequence, inplace=False):
"""Return the complement as an RNA sequence.
If given a string, returns a new string object.
Given a Seq object, returns a new Seq object.
Given a MutableSeq, returns a new MutableSeq object.
Given a SeqRecord object, returns a new SeqRecord object.
>>> my_seq = "CGA"
>>> complement_rna(my_seq)
'GCU'
>>> my_seq = Seq("CGA")
>>> complement_rna(my_seq)
Seq('GCU')
>>> my_seq = MutableSeq("CGA")
>>> complement_rna(my_seq)
MutableSeq('GCU')
>>> my_seq
MutableSeq('CGA')
Any T in the sequence is treated as a U:
>>> complement_rna(Seq("CGAUT"))
Seq('GCUAA')
In contrast, ``complement`` returns a DNA sequence:
>>> complement(Seq("CGAUT"))
Seq('GCTAA')
Supports and lower- and upper-case characters, and unambiguous and
ambiguous nucleotides. All other characters are not converted:
>>> complement_rna("ACGTUacgtuXYZxyz")
'UGCAAugcaaXRZxrz'
The sequence is modified in-place and returned if inplace is True:
>>> my_seq = MutableSeq("CGA")
>>> complement(my_seq, inplace=True)
MutableSeq('GCT')
>>> my_seq
MutableSeq('GCT')
As strings and ``Seq`` objects are immutable, a ``TypeError`` is
raised if ``reverse_complement`` is called on a ``Seq`` object with
``inplace=True``.
"""
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(sequence, (Seq, MutableSeq)):
return sequence.complement_rna(inplace)
if isinstance(sequence, SeqRecord):
if inplace:
raise TypeError("SeqRecords are immutable")
return sequence.complement_rna()
# Assume it's a string.
if inplace:
raise TypeError("strings are immutable")
sequence = sequence.encode("ASCII")
sequence = sequence.translate(_rna_complement_table)
return sequence.decode("ASCII") |
Run the Bio.Seq module's doctests (PRIVATE). | def _test():
"""Run the Bio.Seq module's doctests (PRIVATE)."""
print("Running doctests...")
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
print("Done") |
Decorate a function as having an attribute named 'previous'. | def function_with_previous(func: F) -> _FunctionWithPrevious[F]:
"""Decorate a function as having an attribute named 'previous'."""
function_with_previous = cast(_FunctionWithPrevious[F], func)
# Make sure the cast isn't a lie.
function_with_previous.previous = None
return function_with_previous |
Find the absolute path of Biopython's Tests directory.
Arguments:
start_dir -- Initial directory to begin lookup (default to current dir)
If the directory is not found up the filesystem's root directory, an
exception will be raised. | def find_test_dir(start_dir: Optional[str] = None) -> str:
"""Find the absolute path of Biopython's Tests directory.
Arguments:
start_dir -- Initial directory to begin lookup (default to current dir)
If the directory is not found up the filesystem's root directory, an
exception will be raised.
"""
if not start_dir:
# no callbacks in function signatures!
# defaults to the current directory
# (using __file__ would give the installed Biopython)
start_dir = "."
target = os.path.abspath(start_dir)
while True:
if os.path.isdir(os.path.join(target, "Bio")) and os.path.isdir(
os.path.join(target, "Tests")
):
# Good, we're in the Biopython root now
return os.path.abspath(os.path.join(target, "Tests"))
# Recurse up the tree
# TODO - Test this on Windows
new, tmp = os.path.split(target)
if target == new:
# Reached root
break
target = new
raise ValueError(
f"Not within Biopython source tree: {os.path.abspath(start_dir)!r}"
) |
Run doctest for the importing module. | def run_doctest(target_dir: Optional[str] = None, *args: Any, **kwargs: Any) -> None:
"""Run doctest for the importing module."""
import doctest
# default doctest options
default_kwargs = {"optionflags": doctest.ELLIPSIS}
kwargs.update(default_kwargs)
cur_dir = os.path.abspath(os.curdir)
print("Running doctests...")
try:
os.chdir(find_test_dir(target_dir))
doctest.testmod(*args, **kwargs)
finally:
# and revert back to initial directory
os.chdir(cur_dir)
print("Done") |
Read Affymetrix CEL file and return Record object.
CEL files format versions 3 and 4 are supported.
Please specify the CEL file format as 3 or 4 if known for the version
argument. If the version number is not specified, the parser will attempt
to detect the version from the file contents.
The Record object returned by this function stores the intensities from
the CEL file in record.intensities.
Currently, record.mask and record.outliers are not set in when parsing
version 4 CEL files.
Example Usage:
>>> from Bio.Affy import CelFile
>>> with open("Affy/affy_v3_example.CEL") as handle:
... record = CelFile.read(handle)
...
>>> record.version == 3
True
>>> print("%i by %i array" % record.intensities.shape)
5 by 5 array
>>> with open("Affy/affy_v4_example.CEL", "rb") as handle:
... record = CelFile.read(handle, version=4)
...
>>> record.version == 4
True
>>> print("%i by %i array" % record.intensities.shape)
5 by 5 array | def read(handle, version=None):
"""Read Affymetrix CEL file and return Record object.
CEL files format versions 3 and 4 are supported.
Please specify the CEL file format as 3 or 4 if known for the version
argument. If the version number is not specified, the parser will attempt
to detect the version from the file contents.
The Record object returned by this function stores the intensities from
the CEL file in record.intensities.
Currently, record.mask and record.outliers are not set in when parsing
version 4 CEL files.
Example Usage:
>>> from Bio.Affy import CelFile
>>> with open("Affy/affy_v3_example.CEL") as handle:
... record = CelFile.read(handle)
...
>>> record.version == 3
True
>>> print("%i by %i array" % record.intensities.shape)
5 by 5 array
>>> with open("Affy/affy_v4_example.CEL", "rb") as handle:
... record = CelFile.read(handle, version=4)
...
>>> record.version == 4
True
>>> print("%i by %i array" % record.intensities.shape)
5 by 5 array
"""
try:
data = handle.read(0)
except AttributeError:
raise ValueError("handle should be a file handle") from None
data = handle.read(4)
if not data:
raise ValueError("Empty file.")
if data == b"[CEL":
raise ValueError("CEL file in version 3 format should be opened in text mode")
if data == "[CEL":
# Version 3 format. Continue to read the header here before passing
# control to _read_v3 to avoid having to seek to the beginning of
# the file.
data += next(handle)
if data.strip() != "[CEL]":
raise ValueError("Failed to parse Affy Version 3 CEL file.")
line = next(handle)
keyword, value = line.split("=", 1)
if keyword != "Version":
raise ValueError("Failed to parse Affy Version 3 CEL file.")
version = int(value)
if version != 3:
raise ValueError("Incorrect version number in Affy Version 3 CEL file.")
return _read_v3(handle)
try:
magicNumber = struct.unpack("<i", data)
except TypeError:
raise ValueError(
"CEL file in version 4 format should be opened in binary mode"
) from None
except struct.error:
raise ValueError(
"Failed to read magic number from Affy Version 4 CEL file"
) from None
if magicNumber != (64,):
raise ValueError("Incorrect magic number in Affy Version 4 CEL file")
return _read_v4(handle) |
3 column output: position, aa in representative sequence, ic_vector value. | def print_info_content(summary_info, fout=None, rep_record=0):
"""3 column output: position, aa in representative sequence, ic_vector value."""
warnings.warn(
"The `print_info_content` function is deprecated and will be removed "
"in a future release of Biopython.",
BiopythonDeprecationWarning,
)
fout = fout or sys.stdout
if not summary_info.ic_vector:
summary_info.information_content()
rep_sequence = summary_info.alignment[rep_record]
for pos, (aa, ic) in enumerate(zip(rep_sequence, summary_info.ic_vector)):
fout.write("%d %s %.3f\n" % (pos, aa, ic)) |
Calculate dN and dS of the given two sequences.
Available methods:
- NG86 - `Nei and Gojobori (1986)`_ (PMID 3444411).
- LWL85 - `Li et al. (1985)`_ (PMID 3916709).
- ML - `Goldman and Yang (1994)`_ (PMID 7968486).
- YN00 - `Yang and Nielsen (2000)`_ (PMID 10666704).
.. _`Nei and Gojobori (1986)`: http://www.ncbi.nlm.nih.gov/pubmed/3444411
.. _`Li et al. (1985)`: http://www.ncbi.nlm.nih.gov/pubmed/3916709
.. _`Goldman and Yang (1994)`: http://mbe.oxfordjournals.org/content/11/5/725
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
Arguments:
- k - transition/transversion rate ratio
- cfreq - Current codon frequency vector can only be specified
when you are using ML method. Possible ways of
getting cfreq are: F1x4, F3x4 and F61. | def calculate_dn_ds(alignment, method="NG86", codon_table=None, k=1, cfreq=None):
"""Calculate dN and dS of the given two sequences.
Available methods:
- NG86 - `Nei and Gojobori (1986)`_ (PMID 3444411).
- LWL85 - `Li et al. (1985)`_ (PMID 3916709).
- ML - `Goldman and Yang (1994)`_ (PMID 7968486).
- YN00 - `Yang and Nielsen (2000)`_ (PMID 10666704).
.. _`Nei and Gojobori (1986)`: http://www.ncbi.nlm.nih.gov/pubmed/3444411
.. _`Li et al. (1985)`: http://www.ncbi.nlm.nih.gov/pubmed/3916709
.. _`Goldman and Yang (1994)`: http://mbe.oxfordjournals.org/content/11/5/725
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
Arguments:
- k - transition/transversion rate ratio
- cfreq - Current codon frequency vector can only be specified
when you are using ML method. Possible ways of
getting cfreq are: F1x4, F3x4 and F61.
"""
if cfreq is None:
cfreq = "F3x4"
elif cfreq is not None and method != "ML":
raise ValueError("cfreq can only be specified when you are using ML method")
elif cfreq not in ("F1x4", "F3x4", "F61"):
raise ValueError("cfreq must be 'F1x4', 'F3x4', or 'F61'")
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
codons1 = []
codons2 = []
sequence1, sequence2 = alignment.sequences
try:
sequence1 = sequence1.seq # stupid SeqRecord
except AttributeError:
pass
sequence1 = str(sequence1)
try:
sequence2 = sequence2.seq # stupid SeqRecord
except AttributeError:
pass
sequence2 = str(sequence2)
aligned1, aligned2 = alignment.aligned
for block1, block2 in zip(aligned1, aligned2):
start1, end1 = block1
start2, end2 = block2
codons1.extend(sequence1[i : i + 3] for i in range(start1, end1, 3))
codons2.extend(sequence2[i : i + 3] for i in range(start2, end2, 3))
bases = {"A", "T", "C", "G"}
for codon1 in codons1:
if not all(nucleotide in bases for nucleotide in codon1):
raise ValueError(
f"Unrecognized character in {codon1} in the target sequence"
" (Codons consist of A, T, C or G)"
)
for codon2 in codons2:
if not all(nucleotide in bases for nucleotide in codon2):
raise ValueError(
f"Unrecognized character in {codon2} in the query sequence"
" (Codons consist of A, T, C or G)"
)
if method == "ML":
return _ml(codons1, codons2, cfreq, codon_table)
elif method == "NG86":
return _ng86(codons1, codons2, k, codon_table)
elif method == "LWL85":
return _lwl85(codons1, codons2, codon_table)
elif method == "YN00":
return _yn00(codons1, codons2, codon_table)
else:
raise ValueError(f"Unknown method '{method}'") |
NG86 method main function (PRIVATE). | def _ng86(codons1, codons2, k, codon_table):
"""NG86 method main function (PRIVATE)."""
S_sites1, N_sites1 = _count_site_NG86(codons1, codon_table=codon_table, k=k)
S_sites2, N_sites2 = _count_site_NG86(codons2, codon_table=codon_table, k=k)
S_sites = (S_sites1 + S_sites2) / 2.0
N_sites = (N_sites1 + N_sites2) / 2.0
SN = [0, 0]
for codon1, codon2 in zip(codons1, codons2):
SN = [
m + n
for m, n in zip(
SN, _count_diff_NG86(codon1, codon2, codon_table=codon_table)
)
]
ps = SN[0] / S_sites
pn = SN[1] / N_sites
if ps < 3 / 4:
dS = abs(-3.0 / 4 * log(1 - 4.0 / 3 * ps))
else:
dS = -1
if pn < 3 / 4:
dN = abs(-3.0 / 4 * log(1 - 4.0 / 3 * pn))
else:
dN = -1
return dN, dS |
Count synonymous and non-synonymous sites of a list of codons (PRIVATE).
Arguments:
- codons - A list of three letter codons.
- k - transition/transversion rate ratio. | def _count_site_NG86(codons, codon_table, k=1):
"""Count synonymous and non-synonymous sites of a list of codons (PRIVATE).
Arguments:
- codons - A list of three letter codons.
- k - transition/transversion rate ratio.
"""
S_site = 0 # synonymous sites
N_site = 0 # non-synonymous sites
purine = ("A", "G")
pyrimidine = ("T", "C")
bases = ("A", "T", "C", "G")
for codon in codons:
neighbor_codon = {"transition": [], "transversion": []}
# classify neighbor codons
codon = codon.replace("U", "T")
for i, nucleotide in enumerate(codon):
for base in bases:
if nucleotide == base:
pass
elif nucleotide in purine and base in purine:
codon_chars = list(codon)
codon_chars[i] = base
this_codon = "".join(codon_chars)
neighbor_codon["transition"].append(this_codon)
elif nucleotide in pyrimidine and base in pyrimidine:
codon_chars = list(codon)
codon_chars[i] = base
this_codon = "".join(codon_chars)
neighbor_codon["transition"].append(this_codon)
else:
codon_chars = list(codon)
codon_chars[i] = base
this_codon = "".join(codon_chars)
neighbor_codon["transversion"].append(this_codon)
# count synonymous and non-synonymous sites
aa = codon_table.forward_table[codon]
this_codon_N_site = this_codon_S_site = 0
for neighbor in neighbor_codon["transition"]:
if neighbor in codon_table.stop_codons:
this_codon_N_site += k
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += k
else:
this_codon_N_site += k
for neighbor in neighbor_codon["transversion"]:
if neighbor in codon_table.stop_codons:
this_codon_N_site += 1
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += 1
else:
this_codon_N_site += 1
norm_const = (this_codon_N_site + this_codon_S_site) / 3
S_site += this_codon_S_site / norm_const
N_site += this_codon_N_site / norm_const
return (S_site, N_site) |
Count differences between two codons, three-letter string (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account. | def _count_diff_NG86(codon1, codon2, codon_table):
"""Count differences between two codons, three-letter string (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account.
"""
SN = [0, 0] # synonymous and nonsynonymous counts
if codon1 == codon2:
return SN
else:
diff_pos = [
i
for i, (nucleotide1, nucleotide2) in enumerate(zip(codon1, codon2))
if nucleotide1 != nucleotide2
]
def compare_codon(codon1, codon2, codon_table, weight=1):
"""Compare two codon accounting for different pathways."""
sd = nd = 0
if len(set(map(codon_table.forward_table.get, [codon1, codon2]))) == 1:
sd += weight
else:
nd += weight
return (sd, nd)
if len(diff_pos) == 1:
SN = [
i + j
for i, j in zip(
SN, compare_codon(codon1, codon2, codon_table=codon_table)
)
]
elif len(diff_pos) == 2:
for i in diff_pos:
temp_codon = codon1[:i] + codon2[i] + codon1[i + 1 :]
SN = [
i + j
for i, j in zip(
SN,
compare_codon(
codon1, temp_codon, codon_table=codon_table, weight=0.5
),
)
]
SN = [
i + j
for i, j in zip(
SN,
compare_codon(
temp_codon, codon2, codon_table=codon_table, weight=0.5
),
)
]
elif len(diff_pos) == 3:
paths = list(permutations([0, 1, 2], 3))
tmp_codon = []
for index1, index2, index3 in paths:
tmp1 = codon1[:index1] + codon2[index1] + codon1[index1 + 1 :]
tmp2 = tmp1[:index2] + codon2[index2] + tmp1[index2 + 1 :]
tmp_codon.append((tmp1, tmp2))
SN = [
i + j
for i, j in zip(
SN, compare_codon(codon1, tmp1, codon_table, weight=0.5 / 3)
)
]
SN = [
i + j
for i, j in zip(
SN, compare_codon(tmp1, tmp2, codon_table, weight=0.5 / 3)
)
]
SN = [
i + j
for i, j in zip(
SN, compare_codon(tmp2, codon2, codon_table, weight=0.5 / 3)
)
]
return SN |
LWL85 method main function (PRIVATE).
Nomenclature is according to Li et al. (1985), PMID 3916709. | def _lwl85(codons1, codons2, codon_table):
"""LWL85 method main function (PRIVATE).
Nomenclature is according to Li et al. (1985), PMID 3916709.
"""
codon_fold_dict = _get_codon_fold(codon_table)
# count number of sites in different degenerate classes
fold0 = [0, 0]
fold2 = [0, 0]
fold4 = [0, 0]
for codon in codons1 + codons2:
fold_num = codon_fold_dict[codon]
for f in fold_num:
if f == "0":
fold0[0] += 1
elif f == "2":
fold2[0] += 1
elif f == "4":
fold4[0] += 1
L = [sum(fold0) / 2.0, sum(fold2) / 2.0, sum(fold4) / 2.0]
# count number of differences in different degenerate classes
PQ = [0] * 6 # with P0, P2, P4, Q0, Q2, Q4 in each position
for codon1, codon2 in zip(codons1, codons2):
if codon1 == codon2:
continue
PQ = [
i + j
for i, j in zip(PQ, _diff_codon(codon1, codon2, fold_dict=codon_fold_dict))
]
PQ = [i / j for i, j in zip(PQ, L * 2)]
P = PQ[:3]
Q = PQ[3:]
A = [
(1.0 / 2) * log(1.0 / (1 - 2 * i - j)) - (1.0 / 4) * log(1.0 / (1 - 2 * j))
for i, j in zip(P, Q)
]
B = [(1.0 / 2) * log(1.0 / (1 - 2 * i)) for i in Q]
dS = 3 * (L[2] * A[1] + L[2] * (A[2] + B[2])) / (L[1] + 3 * L[2])
dN = 3 * (L[2] * B[1] + L[0] * (A[0] + B[0])) / (2 * L[1] + 3 * L[0])
return dN, dS |
Classify different position in a codon into different folds (PRIVATE). | def _get_codon_fold(codon_table):
"""Classify different position in a codon into different folds (PRIVATE)."""
fold_table = {}
forward_table = codon_table.forward_table
bases = {"A", "T", "C", "G"}
for codon in forward_table:
if "U" in codon:
continue
fold = ""
codon_base_lst = list(codon)
for i, base in enumerate(codon_base_lst):
other_bases = bases - set(base)
aa = []
for other_base in other_bases:
codon_base_lst[i] = other_base
try:
aa.append(forward_table["".join(codon_base_lst)])
except KeyError:
aa.append("stop")
if aa.count(forward_table[codon]) == 0:
fold += "0"
elif aa.count(forward_table[codon]) in (1, 2):
fold += "2"
elif aa.count(forward_table[codon]) == 3:
fold += "4"
else:
raise RuntimeError(
"Unknown Error, cannot assign the position to a fold"
)
codon_base_lst[i] = base
fold_table[codon] = fold
return fold_table |
Count number of different substitution types between two codons (PRIVATE).
returns tuple (P0, P2, P4, Q0, Q2, Q4)
Nomenclature is according to Li et al. (1958), PMID 3916709. | def _diff_codon(codon1, codon2, fold_dict):
"""Count number of different substitution types between two codons (PRIVATE).
returns tuple (P0, P2, P4, Q0, Q2, Q4)
Nomenclature is according to Li et al. (1958), PMID 3916709.
"""
P0 = P2 = P4 = Q0 = Q2 = Q4 = 0
fold_num = fold_dict[codon1]
purine = ("A", "G")
pyrimidine = ("T", "C")
for n, (nucleotide1, nucleotide2) in enumerate(zip(codon1, codon2)):
if nucleotide1 == nucleotide2:
pass
elif nucleotide1 in purine and nucleotide2 in purine:
if fold_num[n] == "0":
P0 += 1
elif fold_num[n] == "2":
P2 += 1
elif fold_num[n] == "4":
P4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
elif nucleotide1 in pyrimidine and nucleotide2 in pyrimidine:
if fold_num[n] == "0":
P0 += 1
elif fold_num[n] == "2":
P2 += 1
elif fold_num[n] == "4":
P4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
else:
# nucleotide1 in purine and nucleotide2 in pyrimidine, or
# nucleotide1 in pyrimidine and nucleotide2 in purine
if fold_num[n] == "0":
Q0 += 1
elif fold_num[n] == "2":
Q2 += 1
elif fold_num[n] == "4":
Q4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
return (P0, P2, P4, Q0, Q2, Q4) |
YN00 method main function (PRIVATE).
Nomenclature is according to Yang and Nielsen (2000), PMID 10666704. | def _yn00(codons1, codons2, codon_table):
"""YN00 method main function (PRIVATE).
Nomenclature is according to Yang and Nielsen (2000), PMID 10666704.
"""
from scipy.linalg import expm
fcodon = [
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
]
codon_fold_dict = _get_codon_fold(codon_table)
fold0_cnt = defaultdict(int)
fold4_cnt = defaultdict(int)
for codon in codons1 + codons2:
# count sites at different codon position
fcodon[0][codon[0]] += 1
fcodon[1][codon[1]] += 1
fcodon[2][codon[2]] += 1
# count sites in different degenerate fold class
fold_num = codon_fold_dict[codon]
for i, f in enumerate(fold_num):
if f == "0":
fold0_cnt[codon[i]] += 1
elif f == "4":
fold4_cnt[codon[i]] += 1
f0_total = sum(fold0_cnt.values())
f4_total = sum(fold4_cnt.values())
for i, j in zip(fold0_cnt, fold4_cnt):
fold0_cnt[i] = fold0_cnt[i] / f0_total
fold4_cnt[i] = fold4_cnt[i] / f4_total
# TODO:
# the initial kappa is different from what yn00 gives,
# try to find the problem.
TV = _get_TV(codons1, codons2, codon_table=codon_table)
k04 = (_get_kappa_t(fold0_cnt, TV), _get_kappa_t(fold4_cnt, TV))
kappa = (f0_total * k04[0] + f4_total * k04[1]) / (f0_total + f4_total)
# kappa = 2.4285
# count synonymous sites and non-synonymous sites
for i in range(3):
tot = sum(fcodon[i].values())
fcodon[i] = {j: k / tot for j, k in fcodon[i].items()}
pi = defaultdict(int)
for codon in list(codon_table.forward_table.keys()) + codon_table.stop_codons:
if "U" not in codon:
pi[codon] = 0
for codon in codons1 + codons2:
pi[codon] += 1
S_sites1, N_sites1, bfreqSN1 = _count_site_YN00(
codons1, codons2, pi, k=kappa, codon_table=codon_table
)
S_sites2, N_sites2, bfreqSN2 = _count_site_YN00(
codons2, codons1, pi, k=kappa, codon_table=codon_table
)
N_sites = (N_sites1 + N_sites2) / 2
S_sites = (S_sites1 + S_sites2) / 2
bfreqSN = [{"A": 0, "T": 0, "C": 0, "G": 0}, {"A": 0, "T": 0, "C": 0, "G": 0}]
for i in range(2):
for base in ("A", "T", "C", "G"):
bfreqSN[i][base] = (bfreqSN1[i][base] + bfreqSN2[i][base]) / 2
# use NG86 method to get initial t and w
SN = [0, 0]
for codon1, codon2 in zip(codons1, codons2):
SN = [
m + n
for m, n in zip(
SN, _count_diff_NG86(codon1, codon2, codon_table=codon_table)
)
]
ps = SN[0] / S_sites
pn = SN[1] / N_sites
p = sum(SN) / (S_sites + N_sites)
w = log(1 - 4.0 / 3 * pn) / log(1 - 4.0 / 3 * ps)
t = -3 / 4 * log(1 - 4 / 3 * p)
tolerance = 1e-5
dSdN_pre = [0, 0]
for temp in range(20):
# count synonymous and nonsynonymous differences under kappa, w, t
codons = [
codon
for codon in list(codon_table.forward_table.keys())
+ codon_table.stop_codons
if "U" not in codon
]
Q = _get_Q(pi, kappa, w, codons, codon_table)
P = expm(Q * t)
TV = [0, 0, 0, 0] # synonymous/nonsynonymous transition/transversion
codon_npath = Counter(zip(codons1, codons2))
for (nucleotide1, nucleotide2), count in codon_npath.items():
tv = _count_diff_YN00(nucleotide1, nucleotide2, P, codons, codon_table)
TV = [m + n * count for m, n in zip(TV, tv)]
TV = (TV[0] / S_sites, TV[1] / S_sites), (TV[2] / N_sites, TV[3] / N_sites)
# according to the DistanceF84() function of yn00.c in paml,
# the t (e.q. 10) appears in PMID: 10666704 is dS and dN
dSdN = []
for f, tv in zip(bfreqSN, TV):
dSdN.append(_get_kappa_t(f, tv, t=True))
t = dSdN[0] * 3 * S_sites / (S_sites + N_sites) + dSdN[1] * 3 * N_sites / (
S_sites + N_sites
)
w = dSdN[1] / dSdN[0]
if all(abs(i - j) < tolerance for i, j in zip(dSdN, dSdN_pre)):
return dSdN[1], dSdN[0] # dN, dS
dSdN_pre = dSdN |
Get TV (PRIVATE).
Arguments:
- T - proportions of transitional differences
- V - proportions of transversional differences | def _get_TV(codons1, codons2, codon_table):
"""Get TV (PRIVATE).
Arguments:
- T - proportions of transitional differences
- V - proportions of transversional differences
"""
purine = ("A", "G")
pyrimidine = ("C", "T")
TV = [0, 0]
sites = 0
for codon1, codon2 in zip(codons1, codons2):
for nucleotide1, nucleotide2 in zip(codon1, codon2):
if nucleotide1 == nucleotide2:
pass
elif nucleotide1 in purine and nucleotide2 in purine:
TV[0] += 1
elif nucleotide1 in pyrimidine and nucleotide2 in pyrimidine:
TV[0] += 1
else:
TV[1] += 1
sites += 1
return (TV[0] / sites, TV[1] / sites) |
Calculate kappa (PRIVATE).
The following formula and variable names are according to PMID: 10666704 | def _get_kappa_t(pi, TV, t=False):
"""Calculate kappa (PRIVATE).
The following formula and variable names are according to PMID: 10666704
"""
pi["Y"] = pi["T"] + pi["C"]
pi["R"] = pi["A"] + pi["G"]
A = (
2 * (pi["T"] * pi["C"] + pi["A"] * pi["G"])
+ 2
* (
pi["T"] * pi["C"] * pi["R"] / pi["Y"]
+ pi["A"] * pi["G"] * pi["Y"] / pi["R"]
)
* (1 - TV[1] / (2 * pi["Y"] * pi["R"]))
- TV[0]
) / (2 * (pi["T"] * pi["C"] / pi["Y"] + pi["A"] * pi["G"] / pi["R"]))
B = 1 - TV[1] / (2 * pi["Y"] * pi["R"])
a = -0.5 * log(A) # this seems to be an error in YANG's original paper
b = -0.5 * log(B)
kappaF84 = a / b - 1
if t is False:
kappaHKY85 = 1 + (
pi["T"] * pi["C"] / pi["Y"] + pi["A"] * pi["G"] / pi["R"]
) * kappaF84 / (pi["T"] * pi["C"] + pi["A"] * pi["G"])
return kappaHKY85
else:
t = (
4 * pi["T"] * pi["C"] * (1 + kappaF84 / pi["Y"])
+ 4 * pi["A"] * pi["G"] * (1 + kappaF84 / pi["R"])
+ 4 * pi["Y"] * pi["R"]
) * b
return t |
Site counting method from Ina / Yang and Nielsen (PRIVATE).
Method from `Ina (1995)`_ as modified by `Yang and Nielsen (2000)`_.
This will return the total number of synonymous and nonsynonymous sites
and base frequencies in each category. The function is equivalent to
the ``CountSites()`` function in ``yn00.c`` of PAML.
.. _`Ina (1995)`: https://doi.org/10.1007/BF00167113
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236 | def _count_site_YN00(codons1, codons2, pi, k, codon_table):
"""Site counting method from Ina / Yang and Nielsen (PRIVATE).
Method from `Ina (1995)`_ as modified by `Yang and Nielsen (2000)`_.
This will return the total number of synonymous and nonsynonymous sites
and base frequencies in each category. The function is equivalent to
the ``CountSites()`` function in ``yn00.c`` of PAML.
.. _`Ina (1995)`: https://doi.org/10.1007/BF00167113
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
"""
length = len(codons1)
assert length == len(codons2)
purine = ("A", "G")
pyrimidine = ("T", "C")
bases = ("A", "T", "C", "G")
codon_dict = codon_table.forward_table
stop = codon_table.stop_codons
codon_npath = Counter(zip(codons1, codons2))
S_sites = N_sites = 0
freqSN = [
{"A": 0, "T": 0, "C": 0, "G": 0}, # synonymous
{"A": 0, "T": 0, "C": 0, "G": 0},
] # nonsynonymous
for codon_pair, npath in codon_npath.items():
codon = codon_pair[0]
S = N = 0
for pos in range(3):
for base in bases:
if codon[pos] == base:
continue
neighbor_codon = codon[:pos] + base + codon[pos + 1 :]
if neighbor_codon in stop:
continue
weight = pi[neighbor_codon]
if codon[pos] in pyrimidine and base in pyrimidine:
weight *= k
elif codon[pos] in purine and base in purine:
weight *= k
if codon_dict[codon] == codon_dict[neighbor_codon]:
S += weight
freqSN[0][base] += weight * npath
else:
N += weight
freqSN[1][base] += weight * npath
S_sites += S * npath
N_sites += N * npath
norm_const = 3 * length / (S_sites + N_sites)
S_sites *= norm_const
N_sites *= norm_const
for i in freqSN:
norm_const = sum(i.values())
for b in i:
i[b] /= norm_const
return S_sites, N_sites, freqSN |
Count differences between two codons (three-letter string; PRIVATE).
The function will weighted multiple pathways from codon1 to codon2
according to P matrix of codon substitution. The proportion
of transition and transversion (TV) will also be calculated in
the function. | def _count_diff_YN00(codon1, codon2, P, codons, codon_table):
"""Count differences between two codons (three-letter string; PRIVATE).
The function will weighted multiple pathways from codon1 to codon2
according to P matrix of codon substitution. The proportion
of transition and transversion (TV) will also be calculated in
the function.
"""
TV = [
0,
0,
0,
0,
] # transition and transversion counts (synonymous and nonsynonymous)
if codon1 == codon2:
return TV
else:
diff_pos = [
i
for i, (nucleotide1, nucleotide2) in enumerate(zip(codon1, codon2))
if nucleotide1 != nucleotide2
]
def count_TV(codon1, codon2, diff, codon_table, weight=1):
purine = ("A", "G")
pyrimidine = ("T", "C")
dic = codon_table.forward_table
stop = codon_table.stop_codons
if codon1 in stop or codon2 in stop:
# stop codon is always considered as nonsynonymous
if codon1[diff] in purine and codon2[diff] in purine:
return [0, 0, weight, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [0, 0, weight, 0]
else:
return [0, 0, 0, weight]
elif dic[codon1] == dic[codon2]:
if codon1[diff] in purine and codon2[diff] in purine:
return [weight, 0, 0, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [weight, 0, 0, 0]
else:
return [0, weight, 0, 0]
else:
if codon1[diff] in purine and codon2[diff] in purine:
return [0, 0, weight, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [0, 0, weight, 0]
else:
return [0, 0, 0, weight]
if len(diff_pos) == 1:
TV = [
p + q
for p, q in zip(TV, count_TV(codon1, codon2, diff_pos[0], codon_table))
]
elif len(diff_pos) == 2:
tmp_codons = [codon1[:i] + codon2[i] + codon1[i + 1 :] for i in diff_pos]
path_prob = []
for codon in tmp_codons:
codon_idx = list(map(codons.index, [codon1, codon, codon2]))
prob = (P[codon_idx[0], codon_idx[1]], P[codon_idx[1], codon_idx[2]])
path_prob.append(prob[0] * prob[1])
path_prob = [2 * i / sum(path_prob) for i in path_prob]
for n, i in enumerate(diff_pos):
codon = codon1[:i] + codon2[i] + codon1[i + 1 :]
TV = [
p + q
for p, q in zip(
TV,
count_TV(
codon1, codon, i, codon_table, weight=path_prob[n] / 2
),
)
]
TV = [
p + q
for p, q in zip(
TV,
count_TV(
codon1, codon, i, codon_table, weight=path_prob[n] / 2
),
)
]
elif len(diff_pos) == 3:
paths = list(permutations([0, 1, 2], 3))
path_prob = []
tmp_codons = []
for index1, index2, index3 in paths:
tmp1 = codon1[:index1] + codon2[index1] + codon1[index1 + 1 :]
tmp2 = tmp1[:index2] + codon2[index2] + tmp1[index2 + 1 :]
tmp_codons.append((tmp1, tmp2))
codon_idx = list(map(codons.index, [codon1, tmp1, tmp2, codon2]))
prob = (
P[codon_idx[0], codon_idx[1]],
P[codon_idx[1], codon_idx[2]],
P[codon_idx[2], codon_idx[3]],
)
path_prob.append(prob[0] * prob[1] * prob[2])
path_prob = [3 * i / sum(path_prob) for i in path_prob]
for codon, j, k in zip(tmp_codons, path_prob, paths):
TV = [
p + q
for p, q in zip(
TV, count_TV(codon1, codon[0], k[0], codon_table, weight=j / 3)
)
]
TV = [
p + q
for p, q in zip(
TV,
count_TV(codon[0], codon[1], k[1], codon_table, weight=j / 3),
)
]
TV = [
p + q
for p, q in zip(
TV, count_TV(codon[1], codon2, k[1], codon_table, weight=j / 3)
)
]
return TV |
ML method main function (PRIVATE). | def _ml(codons1, codons2, cmethod, codon_table):
"""ML method main function (PRIVATE)."""
from scipy.optimize import minimize
pi = _get_pi(codons1, codons2, cmethod, codon_table=codon_table)
codon_cnt = Counter(zip(codons1, codons2))
codons = [
codon
for codon in list(codon_table.forward_table.keys()) + codon_table.stop_codons
if "U" not in codon
]
# apply optimization
def func(
params, pi=pi, codon_cnt=codon_cnt, codons=codons, codon_table=codon_table
):
"""Temporary function, params = [t, k, w]."""
return -_likelihood_func(
params[0],
params[1],
params[2],
pi,
codon_cnt,
codons=codons,
codon_table=codon_table,
)
# count sites
opt_res = minimize(
func,
[1, 0.1, 2],
method="L-BFGS-B",
bounds=((1e-10, 20), (1e-10, 20), (1e-10, 10)),
tol=1e-5,
)
t, k, w = opt_res.x
Q = _get_Q(pi, k, w, codons, codon_table)
Sd = Nd = 0
for i, codon1 in enumerate(codons):
for j, codon2 in enumerate(codons):
if i != j:
try:
if (
codon_table.forward_table[codon1]
== codon_table.forward_table[codon2]
):
# synonymous count
Sd += pi[codon1] * Q[i, j]
else:
# nonsynonymous count
Nd += pi[codon1] * Q[i, j]
except KeyError:
# This is probably due to stop codons
pass
Sd *= t
Nd *= t
# count differences (with w fixed to 1)
def func_w1(
params, pi=pi, codon_cnt=codon_cnt, codons=codons, codon_table=codon_table
):
"""Temporary function, params = [t, k]. w is fixed to 1."""
return -_likelihood_func(
params[0],
params[1],
1.0,
pi,
codon_cnt,
codons=codons,
codon_table=codon_table,
)
opt_res = minimize(
func_w1,
[1, 0.1],
method="L-BFGS-B",
bounds=((1e-10, 20), (1e-10, 20)),
tol=1e-5,
)
t, k = opt_res.x
w = 1.0
Q = _get_Q(pi, k, w, codons, codon_table)
rhoS = rhoN = 0
for i, codon1 in enumerate(codons):
for j, codon2 in enumerate(codons):
if i != j:
try:
if (
codon_table.forward_table[codon1]
== codon_table.forward_table[codon2]
):
# synonymous count
rhoS += pi[codon1] * Q[i, j]
else:
# nonsynonymous count
rhoN += pi[codon1] * Q[i, j]
except KeyError:
# This is probably due to stop codons
pass
rhoS *= 3
rhoN *= 3
dN = Nd / rhoN
dS = Sd / rhoS
return dN, dS |
Obtain codon frequency dict (pi) from two codon list (PRIVATE).
This function is designed for ML method. Available counting methods
(cfreq) are F1x4, F3x4 and F64. | def _get_pi(codons1, codons2, cmethod, codon_table):
"""Obtain codon frequency dict (pi) from two codon list (PRIVATE).
This function is designed for ML method. Available counting methods
(cfreq) are F1x4, F3x4 and F64.
"""
# TODO:
# Stop codon should not be allowed according to Yang.
# Try to modify this!
pi = {}
if cmethod == "F1x4":
fcodon = Counter(
nucleotide for codon in codons1 + codons2 for nucleotide in codon
)
tot = sum(fcodon.values())
fcodon = {j: k / tot for j, k in fcodon.items()}
for codon in codon_table.forward_table.keys() + codon_table.stop_codons:
if "U" not in codon:
pi[codon] = fcodon[codon[0]] * fcodon[codon[1]] * fcodon[codon[2]]
elif cmethod == "F3x4":
# three codon position
fcodon = [
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
]
for codon in codons1 + codons2:
fcodon[0][codon[0]] += 1
fcodon[1][codon[1]] += 1
fcodon[2][codon[2]] += 1
for i in range(3):
tot = sum(fcodon[i].values())
fcodon[i] = {j: k / tot for j, k in fcodon[i].items()}
for codon in list(codon_table.forward_table.keys()) + codon_table.stop_codons:
if "U" not in codon:
pi[codon] = (
fcodon[0][codon[0]] * fcodon[1][codon[1]] * fcodon[2][codon[2]]
)
elif cmethod == "F61":
for codon in codon_table.forward_table.keys() + codon_table.stop_codons:
if "U" not in codon:
pi[codon] = 0.1
for codon in codons1 + codons2:
pi[codon] += 1
tot = sum(pi.values())
pi = {j: k / tot for j, k in pi.items()}
return pi |
Q matrix for codon substitution (PRIVATE).
Arguments:
- codon1, codon2 : three letter codon string
- pi : expected codon frequency
- k : transition/transversion ratio
- w : nonsynonymous/synonymous rate ratio
- codon_table : Bio.Data.CodonTable object | def _q(codon1, codon2, pi, k, w, codon_table):
"""Q matrix for codon substitution (PRIVATE).
Arguments:
- codon1, codon2 : three letter codon string
- pi : expected codon frequency
- k : transition/transversion ratio
- w : nonsynonymous/synonymous rate ratio
- codon_table : Bio.Data.CodonTable object
"""
if codon1 == codon2:
# diagonal elements is the sum of all other elements
return 0
if codon1 in codon_table.stop_codons or codon2 in codon_table.stop_codons:
return 0
if (codon1 not in pi) or (codon2 not in pi):
return 0
purine = ("A", "G")
pyrimidine = ("T", "C")
diff = [
(i, nucleotide1, nucleotide2)
for i, (nucleotide1, nucleotide2) in enumerate(zip(codon1, codon2))
if nucleotide1 != nucleotide2
]
if len(diff) >= 2:
return 0
if codon_table.forward_table[codon1] == codon_table.forward_table[codon2]:
# synonymous substitution
if diff[0][1] in purine and diff[0][2] in purine:
# transition
return k * pi[codon2]
elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:
# transition
return k * pi[codon2]
else:
# transversion
return pi[codon2]
else:
# nonsynonymous substitution
if diff[0][1] in purine and diff[0][2] in purine:
# transition
return w * k * pi[codon2]
elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:
# transition
return w * k * pi[codon2]
else:
# transversion
return w * pi[codon2] |
Q matrix for codon substitution (PRIVATE). | def _get_Q(pi, k, w, codons, codon_table):
"""Q matrix for codon substitution (PRIVATE)."""
codon_num = len(codons)
Q = np.zeros((codon_num, codon_num))
for i1, codon1 in enumerate(codons):
for i2, codon2 in enumerate(codons):
if i1 != i2:
Q[i1, i2] = _q(codon1, codon2, pi, k, w, codon_table=codon_table)
nucl_substitutions = 0
for i, codon in enumerate(codons):
Q[i, i] = -sum(Q[i, :])
try:
nucl_substitutions += pi[codon] * (-Q[i, i])
except KeyError:
pass
Q /= nucl_substitutions
return Q |
Likelihood function for ML method (PRIVATE). | def _likelihood_func(t, k, w, pi, codon_cnt, codons, codon_table):
"""Likelihood function for ML method (PRIVATE)."""
from scipy.linalg import expm
Q = _get_Q(pi, k, w, codons, codon_table)
P = expm(Q * t)
likelihood = 0
for i, codon1 in enumerate(codons):
for j, codon2 in enumerate(codons):
if (codon1, codon2) in codon_cnt:
if P[i, j] * pi[codon1] <= 0:
likelihood += codon_cnt[(codon1, codon2)] * 0
else:
likelihood += codon_cnt[(codon1, codon2)] * log(
pi[codon1] * P[i, j]
)
return likelihood |
Calculate dN and dS pairwise for the multiple alignment, and return as matrices.
Argument:
- method - Available methods include NG86, LWL85, YN00 and ML.
- codon_table - Codon table to use for forward translation. | def calculate_dn_ds_matrix(alignment, method="NG86", codon_table=None):
"""Calculate dN and dS pairwise for the multiple alignment, and return as matrices.
Argument:
- method - Available methods include NG86, LWL85, YN00 and ML.
- codon_table - Codon table to use for forward translation.
"""
from Bio.Phylo.TreeConstruction import DistanceMatrix
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
sequences = alignment.sequences
coordinates = alignment.coordinates
names = [record.id for record in sequences]
size = len(names)
dn_matrix = []
ds_matrix = []
for i in range(size):
dn_matrix.append([])
ds_matrix.append([])
for j in range(i):
pairwise_sequences = [sequences[i], sequences[j]]
pairwise_coordinates = coordinates[(i, j), :]
pairwise_alignment = Alignment(pairwise_sequences, pairwise_coordinates)
dn, ds = calculate_dn_ds(
pairwise_alignment, method=method, codon_table=codon_table
)
dn_matrix[i].append(dn)
ds_matrix[i].append(ds)
dn_matrix[i].append(0.0)
ds_matrix[i].append(0.0)
dn_dm = DistanceMatrix(names, matrix=dn_matrix)
ds_dm = DistanceMatrix(names, matrix=ds_matrix)
return dn_dm, ds_dm |
McDonald-Kreitman test for neutrality.
Implement the McDonald-Kreitman test for neutrality (PMID: 1904993)
This method counts changes rather than sites
(http://mkt.uab.es/mkt/help_mkt.asp).
Arguments:
- alignment - Alignment of gene nucleotide sequences to compare.
- species - List of the species ID for each sequence in the alignment.
Typically, the species ID is the species name as a string, or an integer.
- codon_table - Codon table to use for forward translation.
Return the p-value of test result. | def mktest(alignment, species=None, codon_table=None):
"""McDonald-Kreitman test for neutrality.
Implement the McDonald-Kreitman test for neutrality (PMID: 1904993)
This method counts changes rather than sites
(http://mkt.uab.es/mkt/help_mkt.asp).
Arguments:
- alignment - Alignment of gene nucleotide sequences to compare.
- species - List of the species ID for each sequence in the alignment.
Typically, the species ID is the species name as a string, or an integer.
- codon_table - Codon table to use for forward translation.
Return the p-value of test result.
"""
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
G, nonsyn_G = _get_codon2codon_matrix(codon_table=codon_table)
unique_species = set(species)
sequences = []
for sequence in alignment.sequences:
try:
sequence = sequence.seq
except AttributeError:
pass
sequence = str(sequence)
sequences.append(sequence)
syn_fix, nonsyn_fix, syn_poly, nonsyn_poly = 0, 0, 0, 0
starts = sys.maxsize
for ends in alignment.coordinates.transpose():
step = min(ends - starts)
for j in range(0, step, 3):
codons = {key: [] for key in unique_species}
for key, sequence, start in zip(species, sequences, starts):
codon = sequence[start + j : start + j + 3]
codons[key].append(codon)
fixed = True
all_codons = set()
for value in codons.values():
value = set(value)
if len(value) > 1:
fixed = False
all_codons.update(value)
if len(all_codons) == 1:
continue
nonsyn = _count_replacement(all_codons, nonsyn_G)
syn = _count_replacement(all_codons, G) - nonsyn
if fixed is True:
# fixed
nonsyn_fix += nonsyn
syn_fix += syn
else:
# not fixed
nonsyn_poly += nonsyn
syn_poly += syn
starts = ends
return _G_test([syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]) |
Get codon codon substitution matrix (PRIVATE).
Elements in the matrix are number of synonymous and nonsynonymous
substitutions required for the substitution. | def _get_codon2codon_matrix(codon_table):
"""Get codon codon substitution matrix (PRIVATE).
Elements in the matrix are number of synonymous and nonsynonymous
substitutions required for the substitution.
"""
bases = ("A", "T", "C", "G")
codons = [
codon
for codon in list(codon_table.forward_table.keys()) + codon_table.stop_codons
if "U" not in codon
]
# set up codon_dict considering stop codons
codon_dict = codon_table.forward_table.copy()
for stop in codon_table.stop_codons:
codon_dict[stop] = "stop"
# count site
num = len(codons)
G = {} # graph for substitution
nonsyn_G = {} # graph for nonsynonymous substitution
graph = {}
graph_nonsyn = {}
for i, codon in enumerate(codons):
graph[codon] = {}
graph_nonsyn[codon] = {}
for p in range(3):
for base in bases:
tmp_codon = codon[0:p] + base + codon[p + 1 :]
if codon_dict[codon] != codon_dict[tmp_codon]:
graph_nonsyn[codon][tmp_codon] = 1
graph[codon][tmp_codon] = 1
else:
if codon != tmp_codon:
graph_nonsyn[codon][tmp_codon] = 0.1
graph[codon][tmp_codon] = 1
for codon1 in codons:
nonsyn_G[codon1] = {}
G[codon1] = {}
for codon2 in codons:
if codon1 == codon2:
nonsyn_G[codon1][codon2] = 0
G[codon1][codon2] = 0
else:
nonsyn_G[codon1][codon2] = _dijkstra(graph_nonsyn, codon1, codon2)
G[codon1][codon2] = _dijkstra(graph, codon1, codon2)
return G, nonsyn_G |
Dijkstra's algorithm Python implementation (PRIVATE).
Algorithm adapted from
http://thomas.pelletier.im/2010/02/dijkstras-algorithm-python-implementation/.
However, an obvious bug in::
if D[child_node] >(<) D[node] + child_value:
is fixed.
This function will return the distance between start and end.
Arguments:
- graph: Dictionary of dictionary (keys are vertices).
- start: Start vertex.
- end: End vertex.
Output:
List of vertices from the beginning to the end. | def _dijkstra(graph, start, end):
"""Dijkstra's algorithm Python implementation (PRIVATE).
Algorithm adapted from
http://thomas.pelletier.im/2010/02/dijkstras-algorithm-python-implementation/.
However, an obvious bug in::
if D[child_node] >(<) D[node] + child_value:
is fixed.
This function will return the distance between start and end.
Arguments:
- graph: Dictionary of dictionary (keys are vertices).
- start: Start vertex.
- end: End vertex.
Output:
List of vertices from the beginning to the end.
"""
D = {} # Final distances dict
P = {} # Predecessor dict
# Fill the dicts with default values
for node in graph.keys():
D[node] = 100 # Vertices are unreachable
P[node] = "" # Vertices have no predecessors
D[start] = 0 # The start vertex needs no move
unseen_nodes = list(graph.keys()) # All nodes are unseen
while len(unseen_nodes) > 0:
# Select the node with the lowest value in D (final distance)
shortest = None
node = ""
for temp_node in unseen_nodes:
if shortest is None:
shortest = D[temp_node]
node = temp_node
elif D[temp_node] < shortest:
shortest = D[temp_node]
node = temp_node
# Remove the selected node from unseen_nodes
unseen_nodes.remove(node)
# For each child (ie: connected vertex) of the current node
for child_node, child_value in graph[node].items():
if D[child_node] > D[node] + child_value:
D[child_node] = D[node] + child_value
# To go to child_node, you have to go through node
P[child_node] = node
if node == end:
break
# Set a clean path
path = []
# We begin from the end
node = end
distance = 0
# While we are not arrived at the beginning
while not (node == start):
if path.count(node) == 0:
path.insert(0, node) # Insert the predecessor of the current node
node = P[node] # The current node becomes its predecessor
else:
break
path.insert(0, start) # Finally, insert the start vertex
for i in range(len(path) - 1):
distance += graph[path[i]][path[i + 1]]
return distance |
Count replacement needed for a given codon_set (PRIVATE). | def _count_replacement(codons, G):
"""Count replacement needed for a given codon_set (PRIVATE)."""
if len(codons) == 1:
return 0, 0
elif len(codons) == 2:
codons = list(codons)
return floor(G[codons[0]][codons[1]])
else:
subgraph = {
codon1: {codon2: G[codon1][codon2] for codon2 in codons if codon1 != codon2}
for codon1 in codons
}
return _prim(subgraph) |
Prim's algorithm to find minimum spanning tree (PRIVATE).
Code is adapted from
http://programmingpraxis.com/2010/04/09/minimum-spanning-tree-prims-algorithm/ | def _prim(G):
"""Prim's algorithm to find minimum spanning tree (PRIVATE).
Code is adapted from
http://programmingpraxis.com/2010/04/09/minimum-spanning-tree-prims-algorithm/
"""
nodes = []
edges = []
for i in G.keys():
nodes.append(i)
for j in G[i]:
if (i, j, G[i][j]) not in edges and (j, i, G[i][j]) not in edges:
edges.append((i, j, G[i][j]))
conn = defaultdict(list)
for n1, n2, c in edges:
conn[n1].append((c, n1, n2))
conn[n2].append((c, n2, n1))
mst = [] # minimum spanning tree
used = set(nodes[0])
usable_edges = conn[nodes[0]][:]
heapify(usable_edges)
while usable_edges:
cost, n1, n2 = heappop(usable_edges)
if n2 not in used:
used.add(n2)
mst.append((n1, n2, cost))
for e in conn[n2]:
if e[2] not in used:
heappush(usable_edges, e)
length = 0
for p in mst:
length += floor(p[2])
return length |
G test for 2x2 contingency table (PRIVATE).
Arguments:
- site_counts - [syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]
>>> print("%0.6f" % _G_test([17, 7, 42, 2]))
0.004924 | def _G_test(site_counts):
"""G test for 2x2 contingency table (PRIVATE).
Arguments:
- site_counts - [syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]
>>> print("%0.6f" % _G_test([17, 7, 42, 2]))
0.004924
"""
# TODO:
# Apply continuity correction for Chi-square test.
G = 0
tot = sum(site_counts)
tot_syn = site_counts[0] + site_counts[2]
tot_non = site_counts[1] + site_counts[3]
tot_fix = sum(site_counts[:2])
tot_poly = sum(site_counts[2:])
exp = [
tot_fix * tot_syn / tot,
tot_fix * tot_non / tot,
tot_poly * tot_syn / tot,
tot_poly * tot_non / tot,
]
for obs, ex in zip(site_counts, exp):
G += obs * log(obs / ex)
# with only 1 degree of freedom for a 2x2 table,
# the cumulative chi-square distribution reduces to a simple form:
return erfc(sqrt(G)) |
Write alignments to a file.
Arguments:
- alignments - An Alignments object, an iterator of Alignment objects, or
a single Alignment.
- target - File or file-like object to write to, or filename as string.
- fmt - String describing the file format (case-insensitive).
Note if providing a file or file-like object, your code should close the
target after calling this function, or call .flush(), to ensure the data
gets flushed to disk.
Returns the number of alignments written (as an integer). | def write(alignments, target, fmt, *args, **kwargs):
"""Write alignments to a file.
Arguments:
- alignments - An Alignments object, an iterator of Alignment objects, or
a single Alignment.
- target - File or file-like object to write to, or filename as string.
- fmt - String describing the file format (case-insensitive).
Note if providing a file or file-like object, your code should close the
target after calling this function, or call .flush(), to ensure the data
gets flushed to disk.
Returns the number of alignments written (as an integer).
"""
if isinstance(alignments, Alignment):
alignments = [alignments]
module = _load(fmt)
try:
writer = module.AlignmentWriter
except AttributeError:
raise ValueError(
f"File writing has not yet been implemented for the {fmt} format"
)
return writer(target, *args, **kwargs).write(alignments) |
Parse an alignment file and return an iterator over alignments.
Arguments:
- source - File or file-like object to read from, or filename as string.
- fmt - String describing the file format (case-insensitive).
Typical usage, opening a file to read in, and looping over the alignments:
>>> from Bio import Align
>>> filename = "Exonerate/exn_22_m_ner_cigar.exn"
>>> for alignment in Align.parse(filename, "exonerate"):
... print("Number of sequences in alignment", len(alignment))
... print("Alignment score:", alignment.score)
Number of sequences in alignment 2
Alignment score: 6150.0
Number of sequences in alignment 2
Alignment score: 502.0
Number of sequences in alignment 2
Alignment score: 440.0
For lazy-loading file formats such as bigMaf, for which the file contents
is read on demand only, ensure that the file remains open while extracting
alignment data.
You can use the Bio.Align.read(...) function when the file contains only
one alignment. | def parse(source, fmt):
"""Parse an alignment file and return an iterator over alignments.
Arguments:
- source - File or file-like object to read from, or filename as string.
- fmt - String describing the file format (case-insensitive).
Typical usage, opening a file to read in, and looping over the alignments:
>>> from Bio import Align
>>> filename = "Exonerate/exn_22_m_ner_cigar.exn"
>>> for alignment in Align.parse(filename, "exonerate"):
... print("Number of sequences in alignment", len(alignment))
... print("Alignment score:", alignment.score)
Number of sequences in alignment 2
Alignment score: 6150.0
Number of sequences in alignment 2
Alignment score: 502.0
Number of sequences in alignment 2
Alignment score: 440.0
For lazy-loading file formats such as bigMaf, for which the file contents
is read on demand only, ensure that the file remains open while extracting
alignment data.
You can use the Bio.Align.read(...) function when the file contains only
one alignment.
"""
module = _load(fmt)
alignments = module.AlignmentIterator(source)
return alignments |
Parse a file containing one alignment, and return it.
Arguments:
- source - File or file-like object to read from, or filename as string.
- fmt - String describing the file format (case-insensitive).
This function is for use parsing alignment files containing exactly one
alignment. For example, reading a Clustal file:
>>> from Bio import Align
>>> alignment = Align.read("Clustalw/opuntia.aln", "clustal")
>>> print("Alignment shape:", alignment.shape)
Alignment shape: (7, 156)
>>> for sequence in alignment.sequences:
... print(sequence.id, len(sequence))
gi|6273285|gb|AF191659.1|AF191 146
gi|6273284|gb|AF191658.1|AF191 148
gi|6273287|gb|AF191661.1|AF191 146
gi|6273286|gb|AF191660.1|AF191 146
gi|6273290|gb|AF191664.1|AF191 150
gi|6273289|gb|AF191663.1|AF191 150
gi|6273291|gb|AF191665.1|AF191 156
If the file contains no records, or more than one record, an exception is
raised. For example:
>>> from Bio import Align
>>> filename = "Exonerate/exn_22_m_ner_cigar.exn"
>>> alignment = Align.read(filename, "exonerate")
Traceback (most recent call last):
...
ValueError: More than one alignment found in file
Use the Bio.Align.parse function if you want to read a file containing
more than one alignment. | def read(handle, fmt):
"""Parse a file containing one alignment, and return it.
Arguments:
- source - File or file-like object to read from, or filename as string.
- fmt - String describing the file format (case-insensitive).
This function is for use parsing alignment files containing exactly one
alignment. For example, reading a Clustal file:
>>> from Bio import Align
>>> alignment = Align.read("Clustalw/opuntia.aln", "clustal")
>>> print("Alignment shape:", alignment.shape)
Alignment shape: (7, 156)
>>> for sequence in alignment.sequences:
... print(sequence.id, len(sequence))
gi|6273285|gb|AF191659.1|AF191 146
gi|6273284|gb|AF191658.1|AF191 148
gi|6273287|gb|AF191661.1|AF191 146
gi|6273286|gb|AF191660.1|AF191 146
gi|6273290|gb|AF191664.1|AF191 150
gi|6273289|gb|AF191663.1|AF191 150
gi|6273291|gb|AF191665.1|AF191 156
If the file contains no records, or more than one record, an exception is
raised. For example:
>>> from Bio import Align
>>> filename = "Exonerate/exn_22_m_ner_cigar.exn"
>>> alignment = Align.read(filename, "exonerate")
Traceback (most recent call last):
...
ValueError: More than one alignment found in file
Use the Bio.Align.parse function if you want to read a file containing
more than one alignment.
"""
alignments = parse(handle, fmt)
try:
alignment = next(alignments)
except StopIteration:
raise ValueError("No alignments found in file") from None
try:
next(alignments)
raise ValueError("More than one alignment found in file")
except StopIteration:
pass
return alignment |
Parse the file and return an Array object. | def read(handle, dtype=float):
"""Parse the file and return an Array object."""
with as_handle(handle) as fp:
lines = fp.readlines()
header = []
for i, line in enumerate(lines):
if not line.startswith("#"):
break
header.append(line[1:].strip())
rows = [line.split() for line in lines[i:]]
if len(rows[0]) == len(rows[1]) == 2:
alphabet = [key for key, value in rows]
for key in alphabet:
if len(key) > 1:
alphabet = tuple(alphabet)
break
else:
alphabet = "".join(alphabet)
matrix = Array(alphabet=alphabet, dims=1, dtype=dtype)
matrix.update(rows)
else:
alphabet = rows.pop(0)
for key in alphabet:
if len(key) > 1:
alphabet = tuple(alphabet)
break
else:
alphabet = "".join(alphabet)
matrix = Array(alphabet=alphabet, dims=2, dtype=dtype)
for letter1, row in zip(alphabet, rows):
assert letter1 == row.pop(0)
for letter2, word in zip(alphabet, row):
matrix[letter1, letter2] = float(word)
matrix.header = header
return matrix |
Load and return a precalculated substitution matrix.
>>> from Bio.Align import substitution_matrices
>>> names = substitution_matrices.load() | def load(name=None):
"""Load and return a precalculated substitution matrix.
>>> from Bio.Align import substitution_matrices
>>> names = substitution_matrices.load()
"""
path = os.path.realpath(__file__)
directory = os.path.dirname(path)
subdirectory = os.path.join(directory, "data")
if name is None:
filenames = os.listdir(subdirectory)
try:
filenames.remove("README.txt")
# The README.txt file is not present in usual Biopython
# installations, but is included in a development install.
except ValueError:
pass
return sorted(filenames)
path = os.path.join(subdirectory, name)
matrix = read(path)
return matrix |
Extract alignment region (PRIVATE).
Helper function for the main parsing code.
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are present as a result of using the -X command line option. | def _extract_alignment_region(alignment_seq_with_flanking, annotation):
"""Extract alignment region (PRIVATE).
Helper function for the main parsing code.
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are present as a result of using the -X command line option.
"""
align_stripped = alignment_seq_with_flanking.strip("-")
display_start = int(annotation["al_display_start"])
if int(annotation["al_start"]) <= int(annotation["al_stop"]):
start = int(annotation["al_start"]) - display_start
end = int(annotation["al_stop"]) - display_start + 1
else:
# FASTA has flipped this sequence...
start = display_start - int(annotation["al_start"])
end = display_start - int(annotation["al_stop"]) + 1
end += align_stripped.count("-")
if start < 0 or start >= end or end > len(align_stripped):
raise ValueError(
"Problem with sequence start/stop,\n%s[%i:%i]\n%s"
% (alignment_seq_with_flanking, start, end, annotation)
)
return align_stripped[start:end] |
Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by Bill Pearson's
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned. | def FastaM10Iterator(handle, seq_count=None):
"""Alignment iterator for the FASTA tool's pairwise alignment output.
This is for reading the pairwise alignments output by Bill Pearson's
FASTA program when called with the -m 10 command line option for machine
readable output. For more details about the FASTA tools, see the website
http://fasta.bioch.virginia.edu/ and the paper:
W.R. Pearson & D.J. Lipman PNAS (1988) 85:2444-2448
This class is intended to be used via the Bio.AlignIO.parse() function
by specifying the format as "fasta-m10" as shown in the following code::
from Bio import AlignIO
handle = ...
for a in AlignIO.parse(handle, "fasta-m10"):
assert len(a) == 2, "Should be pairwise!"
print("Alignment length %i" % a.get_alignment_length())
for record in a:
print("%s %s %s" % (record.seq, record.name, record.id))
Note that this is not a full blown parser for all the information
in the FASTA output - for example, most of the header and all of the
footer is ignored. Also, the alignments are not batched according to
the input queries.
Also note that there can be up to about 30 letters of flanking region
included in the raw FASTA output as contextual information. This is NOT
part of the alignment itself, and is not included in the resulting
MultipleSeqAlignment objects returned.
"""
state_PREAMBLE = -1
state_NONE = 0
state_QUERY_HEADER = 1
state_ALIGN_HEADER = 2
state_ALIGN_QUERY = 3
state_ALIGN_MATCH = 4
state_ALIGN_CONS = 5
def build_hsp():
if not query_tags and not match_tags:
raise ValueError(f"No data for query {query_id!r}, match {match_id!r}")
assert query_tags, query_tags
assert match_tags, match_tags
evalue = align_tags.get("fa_expect")
tool = global_tags.get("tool", "").upper()
q = _extract_alignment_region(query_seq, query_tags)
if tool in ["TFASTX"] and len(match_seq) == len(q):
m = match_seq
# Quick hack until I can work out how -, * and / characters
# and the apparent mix of aa and bp coordinates works.
else:
m = _extract_alignment_region(match_seq, match_tags)
if len(q) != len(m):
raise ValueError(
f"""\
Darn... amino acids vs nucleotide coordinates?
tool: {tool}
query_seq: {query_seq}
query_tags: {query_tags}
{q} length: {len(q)}
match_seq: {match_seq}
match_tags: {match_tags}
{m} length: {len(m)}
handle.name: {handle.name}
"""
)
annotations = {}
records = []
# Want to record both the query header tags, and the alignment tags.
annotations.update(header_tags)
annotations.update(align_tags)
# Query
# =====
record = SeqRecord(
Seq(q),
id=query_id,
name="query",
description=query_descr,
annotations={"original_length": int(query_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(query_tags["al_start"])
record._al_stop = int(query_tags["al_stop"])
# TODO - Can FASTA output RNA?
if "sq_type" in query_tags:
if query_tags["sq_type"] == "D":
record.annotations["molecule_type"] = "DNA"
elif query_tags["sq_type"] == "p":
record.annotations["molecule_type"] = "protein"
records.append(record)
# Match
# =====
record = SeqRecord(
Seq(m),
id=match_id,
name="match",
description=match_descr,
annotations={"original_length": int(match_tags["sq_len"])},
)
# TODO - handle start/end coordinates properly. Short term hack for now:
record._al_start = int(match_tags["al_start"])
record._al_stop = int(match_tags["al_stop"])
if "sq_type" in match_tags:
if match_tags["sq_type"] == "D":
record.annotations["molecule_type"] = "DNA"
elif match_tags["sq_type"] == "p":
record.annotations["molecule_type"] = "protein"
records.append(record)
return MultipleSeqAlignment(records, annotations=annotations)
state = state_PREAMBLE
query_id = None
match_id = None
query_descr = ""
match_descr = ""
global_tags = {}
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
for line in handle:
if ">>>" in line and not line.startswith(">>>"):
if query_id and match_id:
# This happens on old FASTA output which lacked an end of
# query >>><<< marker line.
yield build_hsp()
state = state_NONE
query_descr = line[line.find(">>>") + 3 :].strip()
query_id = query_descr.split(None, 1)[0]
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith("!! No "):
# e.g.
# !! No library sequences with E() < 0.5
# or on more recent versions,
# No sequences with E() < 0.05
assert state == state_NONE
assert not header_tags
assert not align_tags
assert not match_tags
assert not query_tags
assert match_id is None
assert not query_seq
assert not match_seq
assert not cons_seq
query_id = None
elif line.strip() in [">>><<<", ">>>///"]:
# End of query, possible end of all queries
if query_id and match_id:
yield build_hsp()
state = state_NONE
query_id = None
match_id = None
header_tags = {}
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
elif line.startswith(">>>"):
# Should be start of a match!
assert query_id is not None
assert line[3:].split(", ", 1)[0] == query_id, line
assert match_id is None
assert not header_tags
assert not align_tags
assert not query_tags
assert not match_tags
assert not match_seq
assert not query_seq
assert not cons_seq
state = state_QUERY_HEADER
elif line.startswith(">>"):
# Should now be at start of a match alignment!
if query_id and match_id:
yield build_hsp()
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
match_descr = line[2:].strip()
match_id = match_descr.split(None, 1)[0]
state = state_ALIGN_HEADER
elif line.startswith(">--"):
# End of one HSP
assert query_id and match_id, line
yield build_hsp()
# Clean up read for next HSP
# but reuse header_tags
align_tags = {}
query_tags = {}
match_tags = {}
query_seq = ""
match_seq = ""
cons_seq = ""
state = state_ALIGN_HEADER
elif line.startswith(">"):
if state == state_ALIGN_HEADER:
# Should be start of query alignment seq...
assert query_id is not None, line
assert match_id is not None, line
assert query_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_QUERY
elif state == state_ALIGN_QUERY:
# Should be start of match alignment seq
assert query_id is not None, line
assert match_id is not None, line
assert match_id.startswith(line[1:].split(None, 1)[0]), line
state = state_ALIGN_MATCH
elif state == state_NONE:
# Can get > as the last line of a histogram
pass
else:
raise RuntimeError("state %i got %r" % (state, line))
elif line.startswith("; al_cons"):
assert state == state_ALIGN_MATCH, line
state = state_ALIGN_CONS
# Next line(s) should be consensus seq...
elif line.startswith("; "):
if ": " in line:
key, value = (s.strip() for s in line[2:].split(": ", 1))
else:
import warnings
from Bio import BiopythonParserWarning
# Seen in lalign36, specifically version 36.3.4 Apr, 2011
# Fixed in version 36.3.5b Oct, 2011(preload8)
warnings.warn(
f"Missing colon in line: {line!r}", BiopythonParserWarning
)
try:
key, value = (s.strip() for s in line[2:].split(" ", 1))
except ValueError:
raise ValueError(f"Bad line: {line!r}") from None
if state == state_QUERY_HEADER:
header_tags[key] = value
elif state == state_ALIGN_HEADER:
align_tags[key] = value
elif state == state_ALIGN_QUERY:
query_tags[key] = value
elif state == state_ALIGN_MATCH:
match_tags[key] = value
else:
raise RuntimeError(f"Unexpected state {state!r}, {line!r}")
elif state == state_ALIGN_QUERY:
query_seq += line.strip()
elif state == state_ALIGN_MATCH:
match_seq += line.strip()
elif state == state_ALIGN_CONS:
cons_seq += line.strip("\n")
elif state == state_PREAMBLE:
if line.startswith("#"):
global_tags["command"] = line[1:].strip()
elif line.startswith(" version "):
global_tags["version"] = line[9:].strip()
elif " compares a " in line:
global_tags["tool"] = line[: line.find(" compares a ")].strip()
elif " searches a " in line:
global_tags["tool"] = line[: line.find(" searches a ")].strip()
else:
pass |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.