text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
r"""Equilibrium expectation value of a given observable.
<END_TASK>
<USER_TASK:>
Description:
def expectation(T, a, mu=None):
r"""Equilibrium expectation value of a given observable.
Parameters
----------
T : (M, M) ndarray or scipy.sparse matrix
Transition matrix
a : (M,) ndarray
Observable vector
mu : (M,) ndarray (optional)
The stationary distribution of T. If given, the stationary
distribution will not be recalculated (saving lots of time)
Returns
-------
val: float
Equilibrium expectation value fo the given observable
Notes
-----
The equilibrium expectation value of an observable a is defined as follows
.. math::
\mathbb{E}_{\mu}[a] = \sum_i \mu_i a_i
:math:`\mu=(\mu_i)` is the stationary vector of the transition matrix :math:`T`.
Examples
--------
>>> import numpy as np
>>> from msmtools.analysis import expectation
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> a = np.array([1.0, 0.0, 1.0])
>>> m_a = expectation(T, a)
>>> m_a # doctest: +ELLIPSIS
0.909090909...
""" |
# check if square matrix and remember size
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
n = T.shape[0]
a = _types.ensure_ndarray(a, ndim=1, size=n, kind='numeric')
mu = _types.ensure_ndarray_or_None(mu, ndim=1, size=n, kind='numeric')
# go
if not mu:
mu = stationary_distribution(T)
return _np.dot(mu, a) |
<SYSTEM_TASK:>
Constructs the pcca object from dense or sparse
<END_TASK>
<USER_TASK:>
Description:
def _pcca_object(T, m):
"""
Constructs the pcca object from dense or sparse
Parameters
----------
T : (n, n) ndarray or scipy.sparse matrix
Transition matrix
m : int
Number of metastable sets
Returns
-------
pcca : PCCA
PCCA object
""" |
if _issparse(T):
_showSparseConversionWarning()
T = T.toarray()
T = _types.ensure_ndarray(T, ndim=2, uniform=True, kind='numeric')
return dense.pcca.PCCA(T, m) |
<SYSTEM_TASK:>
r"""Sensitivity matrix of a specified eigenvalue.
<END_TASK>
<USER_TASK:>
Description:
def eigenvalue_sensitivity(T, k):
r"""Sensitivity matrix of a specified eigenvalue.
Parameters
----------
T : (M, M) ndarray
Transition matrix
k : int
Compute sensitivity matrix for k-th eigenvalue
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for k-th eigenvalue.
""" |
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
if _issparse(T):
_showSparseConversionWarning()
eigenvalue_sensitivity(T.todense(), k)
else:
return dense.sensitivity.eigenvalue_sensitivity(T, k) |
<SYSTEM_TASK:>
r"""Sensitivity matrix of a selected eigenvector element.
<END_TASK>
<USER_TASK:>
Description:
def eigenvector_sensitivity(T, k, j, right=True):
r"""Sensitivity matrix of a selected eigenvector element.
Parameters
----------
T : (M, M) ndarray
Transition matrix (stochastic matrix).
k : int
Eigenvector index
j : int
Element index
right : bool
If True compute for right eigenvector, otherwise compute for left eigenvector.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for the j-th element of the k-th eigenvector.
""" |
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
if _issparse(T):
_showSparseConversionWarning()
eigenvector_sensitivity(T.todense(), k, j, right=right)
else:
return dense.sensitivity.eigenvector_sensitivity(T, k, j, right=right) |
<SYSTEM_TASK:>
r"""Sensitivity matrix of a stationary distribution element.
<END_TASK>
<USER_TASK:>
Description:
def stationary_distribution_sensitivity(T, j):
r"""Sensitivity matrix of a stationary distribution element.
Parameters
----------
T : (M, M) ndarray
Transition matrix (stochastic matrix).
j : int
Index of stationary distribution element
for which sensitivity matrix is computed.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for the specified element
of the stationary distribution.
""" |
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
if _issparse(T):
_showSparseConversionWarning()
stationary_distribution_sensitivity(T.todense(), j)
else:
return dense.sensitivity.stationary_distribution_sensitivity(T, j) |
<SYSTEM_TASK:>
r"""Sensitivity matrix of the mean first-passage time from specified state.
<END_TASK>
<USER_TASK:>
Description:
def mfpt_sensitivity(T, target, i):
r"""Sensitivity matrix of the mean first-passage time from specified state.
Parameters
----------
T : (M, M) ndarray
Transition matrix
target : int or list
Target state or set for mfpt computation
i : int
Compute the sensitivity for state `i`
Returns
-------
S : (M, M) ndarray
Sensitivity matrix for specified state
""" |
# check input
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
target = _types.ensure_int_vector(target)
# go
if _issparse(T):
_showSparseConversionWarning()
mfpt_sensitivity(T.todense(), target, i)
else:
return dense.sensitivity.mfpt_sensitivity(T, target, i) |
<SYSTEM_TASK:>
r"""Sensitivity matrix of a specified committor entry.
<END_TASK>
<USER_TASK:>
Description:
def committor_sensitivity(T, A, B, i, forward=True):
r"""Sensitivity matrix of a specified committor entry.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
i : int
Compute the sensitivity for committor entry `i`
forward : bool (optional)
Compute the forward committor. If forward
is False compute the backward committor.
Returns
-------
S : (M, M) ndarray
Sensitivity matrix of the specified committor entry.
""" |
# check inputs
T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric')
A = _types.ensure_int_vector(A)
B = _types.ensure_int_vector(B)
if _issparse(T):
_showSparseConversionWarning()
committor_sensitivity(T.todense(), A, B, i, forward)
else:
if forward:
return dense.sensitivity.forward_committor_sensitivity(T, A, B, i)
else:
return dense.sensitivity.backward_committor_sensitivity(T, A, B, i) |
<SYSTEM_TASK:>
r"""Covariance tensor for the non-reversible transition matrix ensemble
<END_TASK>
<USER_TASK:>
Description:
def tmatrix_cov(C, row=None):
r"""Covariance tensor for the non-reversible transition matrix ensemble
Normally the covariance tensor cov(p_ij, p_kl) would carry four indices
(i,j,k,l). In the non-reversible case rows are independent so that
cov(p_ij, p_kl)=0 for i not equal to k. Therefore the function will only
return cov(p_ij, p_ik).
Parameters
----------
C : (M, M) ndarray
Count matrix
row : int (optional)
If row is given return covariance matrix for specified row only
Returns
-------
cov : (M, M, M) ndarray
Covariance tensor
""" |
if row is None:
alpha = C + 1.0 # Dirichlet parameters
alpha0 = alpha.sum(axis=1) # Sum of paramters (per row)
norm = alpha0 ** 2 * (alpha0 + 1.0)
"""Non-normalized covariance tensor"""
Z = -alpha[:, :, np.newaxis] * alpha[:, np.newaxis, :]
"""Correct-diagonal"""
ind = np.diag_indices(C.shape[0])
Z[:, ind[0], ind[1]] += alpha0[:, np.newaxis] * alpha
"""Covariance matrix"""
cov = Z / norm[:, np.newaxis, np.newaxis]
return cov
else:
alpha = C[row, :] + 1.0
return dirichlet_covariance(alpha) |
<SYSTEM_TASK:>
r"""Covariance matrix for Dirichlet distribution.
<END_TASK>
<USER_TASK:>
Description:
def dirichlet_covariance(alpha):
r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix
""" |
alpha0 = alpha.sum()
norm = alpha0 ** 2 * (alpha0 + 1.0)
"""Non normalized covariance"""
Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]
"""Correct diagonal"""
ind = np.diag_indices(Z.shape[0])
Z[ind] += alpha0 * alpha
"""Covariance matrix"""
cov = Z / norm
return cov |
<SYSTEM_TASK:>
Compute mean-first-passage time between subsets of state space.
<END_TASK>
<USER_TASK:>
Description:
def mfpt_between_sets(T, target, origin, mu=None):
"""Compute mean-first-passage time between subsets of state space.
Parameters
----------
T : scipy.sparse matrix
Transition matrix.
target : int or list of int
Set of target states.
origin : int or list of int
Set of starting states.
mu : (M,) ndarray (optional)
The stationary distribution of the transition matrix T.
Returns
-------
tXY : float
Mean first passage time between set X and Y.
Notes
-----
The mean first passage time :math:`\mathbf{E}_X[T_Y]` is the expected
hitting time of one state :math:`y` in :math:`Y` when starting in a
state :math:`x` in :math:`X`:
.. math :: \mathbb{E}_X[T_Y] = \sum_{x \in X}
\frac{\mu_x \mathbb{E}_x[T_Y]}{\sum_{z \in X} \mu_z}
""" |
if mu is None:
mu = stationary_distribution(T)
"""Stationary distribution restriced on starting set X"""
nuX = mu[origin]
muX = nuX / np.sum(nuX)
"""Mean first-passage time to Y (for all possible starting states)"""
tY = mfpt(T, target)
"""Mean first-passage time from X to Y"""
tXY = np.dot(muX, tY[origin])
return tXY |
<SYSTEM_TASK:>
r"""Dot-product that can handle dense and sparse arrays
<END_TASK>
<USER_TASK:>
Description:
def mydot(A, B):
r"""Dot-product that can handle dense and sparse arrays
Parameters
----------
A : numpy ndarray or scipy sparse matrix
The first factor
B : numpy ndarray or scipy sparse matrix
The second factor
Returns
C : numpy ndarray or scipy sparse matrix
The dot-product of A and B
""" |
if issparse(A) :
return A.dot(B)
elif issparse(B):
return (B.T.dot(A.T)).T
else:
return np.dot(A, B) |
<SYSTEM_TASK:>
r"""Compute expected transition counts for Markov chain after N steps.
<END_TASK>
<USER_TASK:>
Description:
def expected_counts(p0, T, N):
r"""Compute expected transition counts for Markov chain after N steps.
Expected counts are computed according to ..math::
E[C_{ij}^{(n)}]=\sum_{k=0}^{N-1} (p_0^T T^{k})_{i} p_{ij}
Parameters
----------
p0 : (M,) ndarray
Starting (probability) vector of the chain.
T : (M, M) sparse matrix
Transition matrix of the chain.
N : int
Number of steps to take from initial state.
Returns
--------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps.
""" |
if (N <= 0):
EC = coo_matrix(T.shape, dtype=float)
return EC
else:
"""Probability vector after (k=0) propagations"""
p_k = 1.0 * p0
"""Sum of vectors after (k=0) propagations"""
p_sum = 1.0 * p_k
"""Transpose T to use sparse dot product"""
Tt = T.transpose()
for k in np.arange(N - 1):
"""Propagate one step p_{k} -> p_{k+1}"""
p_k = Tt.dot(p_k)
"""Update sum"""
p_sum += p_k
D_psum = diags(p_sum, 0)
EC = D_psum.dot(T)
return EC |
<SYSTEM_TASK:>
r"""Dynamical fingerprint for equilibrium or relaxation experiment
<END_TASK>
<USER_TASK:>
Description:
def fingerprint(P, obs1, obs2=None, p0=None, tau=1, k=None, ncv=None):
r"""Dynamical fingerprint for equilibrium or relaxation experiment
The dynamical fingerprint is given by the implied time-scale
spectrum together with the corresponding amplitudes.
Parameters
----------
P : (M, M) scipy.sparse matrix
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
p0 : (M,) ndarray (optional)
Initial distribution for a relaxation experiment
tau : int (optional)
Lag time of given transition matrix, for correct time-scales
k : int (optional)
Number of time-scales and amplitudes to compute
ncv : int (optional)
The number of Lanczos vectors generated, `ncv` must be greater than k;
it is recommended that ncv > 2*k
Returns
-------
timescales : (N,) ndarray
Time-scales of the transition matrix
amplitudes : (N,) ndarray
Amplitudes for the given observable(s)
""" |
if obs2 is None:
obs2 = obs1
R, D, L = rdl_decomposition(P, k=k, ncv=ncv)
"""Stationary vector"""
mu = L[0, :]
"""Extract diagonal"""
w = np.diagonal(D)
"""Compute time-scales"""
timescales = timescales_from_eigenvalues(w, tau)
if p0 is None:
"""Use stationary distribution - we can not use only left
eigenvectors since the system might be non-reversible"""
amplitudes = np.dot(mu * obs1, R) * np.dot(L, obs2)
else:
"""Use initial distribution"""
amplitudes = np.dot(p0 * obs1, R) * np.dot(L, obs2)
return timescales, amplitudes |
<SYSTEM_TASK:>
r"""Time-correlation for equilibrium experiment - via matrix vector products.
<END_TASK>
<USER_TASK:>
Description:
def correlation_matvec(P, obs1, obs2=None, times=[1]):
r"""Time-correlation for equilibrium experiment - via matrix vector products.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
Returns
-------
correlations : ndarray
Correlation values at given times
""" |
if obs2 is None:
obs2 = obs1
"""Compute stationary vector"""
mu = statdist(P)
obs1mu = mu * obs1
times = np.asarray(times)
"""Sort in increasing order"""
ind = np.argsort(times)
times = times[ind]
if times[0] < 0:
raise ValueError("Times can not be negative")
dt = times[1:] - times[0:-1]
nt = len(times)
correlations = np.zeros(nt)
"""Propagate obs2 to initial time"""
obs2_t = 1.0 * obs2
obs2_t = propagate(P, obs2_t, times[0])
correlations[0] = np.dot(obs1mu, obs2_t)
for i in range(nt - 1):
obs2_t = propagate(P, obs2_t, dt[i])
correlations[i + 1] = np.dot(obs1mu, obs2_t)
"""Cast back to original order of time points"""
correlations = correlations[ind]
return correlations |
<SYSTEM_TASK:>
r"""Use matrix A to propagate vector x.
<END_TASK>
<USER_TASK:>
Description:
def propagate(A, x, N):
r"""Use matrix A to propagate vector x.
Parameters
----------
A : (M, M) scipy.sparse matrix
Matrix of propagator
x : (M, ) ndarray or scipy.sparse matrix
Vector to propagate
N : int
Number of steps to propagate
Returns
-------
y : (M, ) ndarray or scipy.sparse matrix
Propagated vector
""" |
y = 1.0 * x
for i in range(N):
y = A.dot(y)
return y |
<SYSTEM_TASK:>
Generates a realization of the Markov chain with transition matrix P.
<END_TASK>
<USER_TASK:>
Description:
def generate_traj(P, N, start=None, stop=None, dt=1):
"""
Generates a realization of the Markov chain with transition matrix P.
Parameters
----------
P : (n, n) ndarray
transition matrix
N : int
trajectory length
start : int, optional, default = None
starting state. If not given, will sample from the stationary distribution of P
stop : int or int-array-like, optional, default = None
stopping set. If given, the trajectory will be stopped before N steps
once a state of the stop set is reached
dt : int
trajectory will be saved every dt time steps.
Internally, the dt'th power of P is taken to ensure a more efficient simulation.
Returns
-------
traj_sliced : (N/dt, ) ndarray
A discrete trajectory with length N/dt
""" |
sampler = MarkovChainSampler(P, dt=dt)
return sampler.trajectory(N, start=start, stop=stop) |
<SYSTEM_TASK:>
Generates multiple realizations of the Markov chain with transition matrix P.
<END_TASK>
<USER_TASK:>
Description:
def generate_trajs(P, M, N, start=None, stop=None, dt=1):
"""
Generates multiple realizations of the Markov chain with transition matrix P.
Parameters
----------
P : (n, n) ndarray
transition matrix
M : int
number of trajectories
N : int
trajectory length
start : int, optional, default = None
starting state. If not given, will sample from the stationary distribution of P
stop : int or int-array-like, optional, default = None
stopping set. If given, the trajectory will be stopped before N steps
once a state of the stop set is reached
dt : int
trajectory will be saved every dt time steps.
Internally, the dt'th power of P is taken to ensure a more efficient simulation.
Returns
-------
traj_sliced : (N/dt, ) ndarray
A discrete trajectory with length N/dt
""" |
sampler = MarkovChainSampler(P, dt=dt)
return sampler.trajectories(M, N, start=start, stop=stop) |
<SYSTEM_TASK:>
r"""Transition matrix describing the Metropolis chain jumping
<END_TASK>
<USER_TASK:>
Description:
def transition_matrix_metropolis_1d(E, d=1.0):
r"""Transition matrix describing the Metropolis chain jumping
between neighbors in a discrete 1D energy landscape.
Parameters
----------
E : (M,) ndarray
Energies in units of kT
d : float (optional)
Diffusivity of the chain, d in (0, 1]
Returns
-------
P : (M, M) ndarray
Transition matrix of the Markov chain
Notes
-----
Transition probabilities are computed as
.. math::
p_{i,i-1} &=& 0.5 d \min \left{ 1.0, \mathrm{e}^{-(E_{i-1} - E_i)} \right}, \\
p_{i,i+1} &=& 0.5 d \min \left{ 1.0, \mathrm{e}^{-(E_{i+1} - E_i)} \right}, \\
p_{i,i} &=& 1.0 - p_{i,i-1} - p_{i,i+1}.
""" |
# check input
if (d <= 0 or d > 1):
raise ValueError('Diffusivity must be in (0,1]. Trying to set the invalid value', str(d))
# init
n = len(E)
P = np.zeros((n, n))
# set offdiagonals
P[0, 1] = 0.5 * d * min(1.0, math.exp(-(E[1] - E[0])))
for i in range(1, n - 1):
P[i, i - 1] = 0.5 * d * min(1.0, math.exp(-(E[i - 1] - E[i])))
P[i, i + 1] = 0.5 * d * min(1.0, math.exp(-(E[i + 1] - E[i])))
P[n - 1, n - 2] = 0.5 * d * min(1.0, math.exp(-(E[n - 2] - E[n - 1])))
# normalize
P += np.diag(1.0 - np.sum(P, axis=1))
# done
return P |
<SYSTEM_TASK:>
Generates a trajectory realization of length N, starting from state s
<END_TASK>
<USER_TASK:>
Description:
def trajectory(self, N, start=None, stop=None):
"""
Generates a trajectory realization of length N, starting from state s
Parameters
----------
N : int
trajectory length
start : int, optional, default = None
starting state. If not given, will sample from the stationary distribution of P
stop : int or int-array-like, optional, default = None
stopping set. If given, the trajectory will be stopped before N steps
once a state of the stop set is reached
""" |
# check input
stop = types.ensure_int_vector_or_None(stop, require_order=False)
if start is None:
if self.mudist is None:
# compute mu, the stationary distribution of P
import msmtools.analysis as msmana
from scipy.stats import rv_discrete
mu = msmana.stationary_distribution(self.P)
self.mudist = rv_discrete(values=(np.arange(self.n), mu))
# sample starting point from mu
start = self.mudist.rvs()
# evaluate stopping set
stopat = np.ndarray((self.n), dtype=bool)
stopat[:] = False
if (stop is not None):
for s in np.array(stop):
stopat[s] = True
# result
traj = np.zeros(N, dtype=int)
traj[0] = start
# already at stopping state?
if stopat[traj[0]]:
return traj[:1]
# else run until end or stopping state
for t in range(1, N):
traj[t] = self.rgs[traj[t - 1]].rvs()
if stopat[traj[t]]:
return traj[:t+1]
# return
return traj |
<SYSTEM_TASK:>
Generates M trajectories, each of length N, starting from state s
<END_TASK>
<USER_TASK:>
Description:
def trajectories(self, M, N, start=None, stop=None):
"""
Generates M trajectories, each of length N, starting from state s
Parameters
----------
M : int
number of trajectories
N : int
trajectory length
start : int, optional, default = None
starting state. If not given, will sample from the stationary distribution of P
stop : int or int-array-like, optional, default = None
stopping set. If given, the trajectory will be stopped before N steps
once a state of the stop set is reached
""" |
trajs = [self.trajectory(N, start=start, stop=stop) for _ in range(M)]
return trajs |
<SYSTEM_TASK:>
splits the discrete trajectory into conditional sequences by starting state
<END_TASK>
<USER_TASK:>
Description:
def _split_sequences_singletraj(dtraj, nstates, lag):
""" splits the discrete trajectory into conditional sequences by starting state
Parameters
----------
dtraj : int-iterable
discrete trajectory
nstates : int
total number of discrete states
lag : int
lag time
""" |
sall = [[] for _ in range(nstates)]
res_states = []
res_seqs = []
for t in range(len(dtraj)-lag):
sall[dtraj[t]].append(dtraj[t+lag])
for i in range(nstates):
if len(sall[i]) > 0:
res_states.append(i)
res_seqs.append(np.array(sall[i]))
return res_states, res_seqs |
<SYSTEM_TASK:>
splits the discrete trajectories into conditional sequences by starting state
<END_TASK>
<USER_TASK:>
Description:
def _split_sequences_multitraj(dtrajs, lag):
""" splits the discrete trajectories into conditional sequences by starting state
Parameters
----------
dtrajs : list of int-iterables
discrete trajectories
nstates : int
total number of discrete states
lag : int
lag time
""" |
n = number_of_states(dtrajs)
res = []
for i in range(n):
res.append([])
for dtraj in dtrajs:
states, seqs = _split_sequences_singletraj(dtraj, n, lag)
for i in range(len(states)):
res[states[i]].append(seqs[i])
return res |
<SYSTEM_TASK:>
Returns conditional sequence for transition i -> j given all conditional sequences
<END_TASK>
<USER_TASK:>
Description:
def _indicator_multitraj(ss, i, j):
""" Returns conditional sequence for transition i -> j given all conditional sequences """ |
iseqs = ss[i]
res = []
for iseq in iseqs:
x = np.zeros(len(iseq))
I = np.where(iseq == j)
x[I] = 1.0
res.append(x)
return res |
<SYSTEM_TASK:>
r""" Computes statistical inefficiencies of sliding-window transition counts at given lag
<END_TASK>
<USER_TASK:>
Description:
def statistical_inefficiencies(dtrajs, lag, C=None, truncate_acf=True, mact=2.0, n_jobs=1, callback=None):
r""" Computes statistical inefficiencies of sliding-window transition counts at given lag
Consider a discrete trajectory :math`{ x_t }` with :math:`x_t \in {1, ..., n}`. For each starting state :math:`i`,
we collect the target sequence
.. mathh:
Y^(i) = {x_{t+\tau} | x_{t}=i}
which contains the time-ordered target states at times :math:`t+\tau` whenever we started in state :math:`i`
at time :math:`t`. Then we define the indicator sequence:
.. math:
a^{(i,j)}_t (\tau) = 1(Y^(i)_t = j)
The statistical inefficiency for transition counts :math:`c_{ij}(tau)` is computed as the statistical inefficiency
of the sequence :math:`a^{(i,j)}_t (\tau)`.
Parameters
----------
dtrajs : list of int-iterables
discrete trajectories
lag : int
lag time
C : scipy sparse matrix (n, n) or None
sliding window count matrix, if already available
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
n_jobs: int, default=1
If greater one, the function will be evaluated with multiple processes.
callback: callable, default=None
will be called for every statistical inefficiency computed (number of nonzero elements in count matrix).
If n_jobs is greater one, the callback will be invoked per finished batch.
Returns
-------
I : scipy sparse matrix (n, n)
Statistical inefficiency matrix with a sparsity pattern identical to the sliding-window count matrix at the
same lag time. Will contain a statistical inefficiency :math:`I_{ij} \in (0,1]` whenever there is a count
:math:`c_{ij} > 0`. When there is no transition count (:math:`c_{ij} = 0`), the statistical inefficiency is 0.
See also
--------
msmtools.util.statistics.statistical_inefficiency
used to compute the statistical inefficiency for conditional trajectories
""" |
# count matrix
if C is None:
C = count_matrix_coo2_mult(dtrajs, lag, sliding=True, sparse=True)
if callback is not None:
if not callable(callback):
raise ValueError('Provided callback is not callable')
# split sequences
splitseq = _split_sequences_multitraj(dtrajs, lag)
# compute inefficiencies
I, J = C.nonzero()
if n_jobs > 1:
from multiprocessing.pool import Pool, MapResult
from contextlib import closing
import tempfile
# to avoid pickling partial results, we store these in a numpy.memmap
ntf = tempfile.NamedTemporaryFile(delete=False)
arr = np.memmap(ntf.name, dtype=np.float64, mode='w+', shape=C.nnz)
#arr[:] = np.nan
gen = _arguments_generator(I, J, splitseq, truncate_acf=truncate_acf, mact=truncate_acf,
array=ntf.name, njobs=n_jobs)
if callback:
x = gen.n_blocks()
_callback = lambda _: callback(x)
else:
_callback = callback
with closing(Pool(n_jobs)) as pool:
result_async = [pool.apply_async(_wrapper, (args,), callback=_callback)
for args in gen]
[t.get() for t in result_async]
data = np.array(arr[:])
#assert np.all(np.isfinite(data))
import os
os.unlink(ntf.name)
else:
data = np.empty(C.nnz)
for index, (i, j) in enumerate(zip(I, J)):
data[index] = statistical_inefficiency(_indicator_multitraj(splitseq, i, j),
truncate_acf=truncate_acf, mact=mact)
if callback is not None:
callback(1)
res = csr_matrix((data, (I, J)), shape=C.shape)
return res |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def transition_matrix_non_reversible(C):
r"""
Estimates a non-reversible transition matrix from count matrix C
T_ij = c_ij / c_i where c_i = sum_j c_ij
Parameters
----------
C: ndarray, shape (n,n)
count matrix
Returns
-------
T: Estimated transition matrix
""" |
# multiply by 1.0 to make sure we're not doing integer division
rowsums = 1.0 * np.sum(C, axis=1)
if np.min(rowsums) <= 0:
raise ValueError(
"Transition matrix has row sum of " + str(np.min(rowsums)) + ". Must have strictly positive row sums.")
return np.divide(C, rowsums[:, np.newaxis]) |
<SYSTEM_TASK:>
r"""Compute time-correlation of obs1, or time-cross-correlation with obs2.
<END_TASK>
<USER_TASK:>
Description:
def time_correlation_direct_by_mtx_vec_prod(P, mu, obs1, obs2=None, time=1, start_values=None, return_P_k_obs=False):
r"""Compute time-correlation of obs1, or time-cross-correlation with obs2.
The time-correlation at time=k is computed by the matrix-vector expression:
cor(k) = obs1' diag(pi) P^k obs2
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
obs1 : ndarray, shape=(n)
Vector representing observable 1 on discrete states
obs2 : ndarray, shape=(n)
Vector representing observable 2 on discrete states. If not given,
the autocorrelation of obs1 will be computed
mu : ndarray, shape=(n)
stationary distribution vector.
time : int
time point at which the (auto)correlation will be evaluated.
start_values : (time, ndarray <P, <P, obs2>>_t)
start iteration of calculation of matrix power product, with this values.
only useful when calling this function out of a loop over times.
return_P_k_obs : bool
if True, the dot product <P^time, obs2> will be returned for further
calculations.
Returns
-------
cor(k) : float
correlation between observations
""" |
# input checks
if not (type(time) == int):
if not (type(time) == np.int64):
raise TypeError("given time (%s) is not an integer, but has type: %s"
% (str(time), type(time)))
if obs1.shape[0] != P.shape[0]:
raise ValueError("observable shape not compatible with given matrix")
if obs2 is None:
obs2 = obs1
# multiply element-wise obs1 and pi. this is obs1' diag(pi)
l = np.multiply(obs1, mu)
# raise transition matrix to power of time by substituting dot product
# <Pk, obs2> with something like <P, <P, obs2>>.
# This saves a lot of matrix matrix multiplications.
if start_values: # begin with a previous calculated val
P_i_obs = start_values[1]
# calculate difference properly!
time_prev = start_values[0]
t_diff = time - time_prev
r = range(t_diff)
else:
if time >= 2:
P_i_obs = np.dot(P, np.dot(P, obs2)) # vector <P, <P, obs2> := P^2 * obs
r = range(time - 2)
elif time == 1:
P_i_obs = np.dot(P, obs2) # P^1 = P*obs
r = range(0)
elif time == 0: # P^0 = I => I*obs2 = obs2
P_i_obs = obs2
r = range(0)
for k in r: # since we already substituted started with 0
P_i_obs = np.dot(P, P_i_obs)
corr = np.dot(l, P_i_obs)
if return_P_k_obs:
return corr, (time, P_i_obs)
else:
return corr |
<SYSTEM_TASK:>
r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.
<END_TASK>
<USER_TASK:>
Description:
def time_correlations_direct(P, pi, obs1, obs2=None, times=[1]):
r"""Compute time-correlations of obs1, or time-cross-correlation with obs2.
The time-correlation at time=k is computed by the matrix-vector expression:
cor(k) = obs1' diag(pi) P^k obs2
Parameters
----------
P : ndarray, shape=(n, n) or scipy.sparse matrix
Transition matrix
obs1 : ndarray, shape=(n)
Vector representing observable 1 on discrete states
obs2 : ndarray, shape=(n)
Vector representing observable 2 on discrete states. If not given,
the autocorrelation of obs1 will be computed
pi : ndarray, shape=(n)
stationary distribution vector. Will be computed if not given
times : array-like, shape(n_t)
Vector of time points at which the (auto)correlation will be evaluated
Returns
-------
""" |
n_t = len(times)
times = np.sort(times) # sort it to use caching of previously computed correlations
f = np.zeros(n_t)
# maximum time > number of rows?
if times[-1] > P.shape[0]:
use_diagonalization = True
R, D, L = rdl_decomposition(P)
# discard imaginary part, if all elements i=0
if not np.any(np.iscomplex(R)):
R = np.real(R)
if not np.any(np.iscomplex(D)):
D = np.real(D)
if not np.any(np.iscomplex(L)):
L = np.real(L)
rdl = (R, D, L)
if use_diagonalization:
for i in range(n_t):
f[i] = time_correlation_by_diagonalization(P, pi, obs1, obs2, times[i], rdl)
else:
start_values = None
for i in range(n_t):
f[i], start_values = \
time_correlation_direct_by_mtx_vec_prod(P, pi, obs1, obs2,
times[i], start_values, True)
return f |
<SYSTEM_TASK:>
r"""Set up augmented system and return.
<END_TASK>
<USER_TASK:>
Description:
def factor_aug(z, DPhival, G, A):
r"""Set up augmented system and return.
Parameters
----------
z : (N+P+M+M,) ndarray
Current iterate, z = (x, nu, l, s)
DPhival : LinearOperator
Jacobian of the variational inequality mapping
G : (M, N) ndarray or sparse matrix
Inequality constraints
A : (P, N) ndarray or sparse matrix
Equality constraints
Returns
-------
J : LinearOperator
Augmented system
""" |
M, N = G.shape
P, N = A.shape
"""Multiplier for inequality constraints"""
l = z[N+P:N+P+M]
"""Slacks"""
s = z[N+P+M:]
"""Sigma matrix"""
SIG = diags(l/s, 0)
# SIG = diags(l*s, 0)
"""Convert A"""
if not issparse(A):
A = csr_matrix(A)
"""Convert G"""
if not issparse(G):
G = csr_matrix(G)
"""Since we expect symmetric DPhival, we need to change A"""
sign = np.zeros(N)
sign[0:N//2] = 1.0
sign[N//2:] = -1.0
T = diags(sign, 0)
A_new = A.dot(T)
W = AugmentedSystem(DPhival, G, SIG, A_new)
return W |
<SYSTEM_TASK:>
r"""Returns the set of intermediate states
<END_TASK>
<USER_TASK:>
Description:
def I(self):
r"""Returns the set of intermediate states
""" |
return list(set(range(self.nstates)) - set(self._A) - set(self._B)) |
<SYSTEM_TASK:>
r"""Sums up the flux from the pathways given
<END_TASK>
<USER_TASK:>
Description:
def _pathways_to_flux(self, paths, pathfluxes, n=None):
r"""Sums up the flux from the pathways given
Parameters
-----------
paths : list of int-arrays
list of pathways
pathfluxes : double-array
array with path fluxes
n : int
number of states. If not set, will be automatically determined.
Returns
-------
flux : (n,n) ndarray of float
the flux containing the summed path fluxes
""" |
if (n is None):
n = 0
for p in paths:
n = max(n, np.max(p))
n += 1
# initialize flux
F = np.zeros((n, n))
for i in range(len(paths)):
p = paths[i]
for t in range(len(p) - 1):
F[p[t], p[t + 1]] += pathfluxes[i]
return F |
<SYSTEM_TASK:>
r"""Returns the main pathway part of the net flux comprising
<END_TASK>
<USER_TASK:>
Description:
def major_flux(self, fraction=0.9):
r"""Returns the main pathway part of the net flux comprising
at most the requested fraction of the full flux.
""" |
(paths, pathfluxes) = self.pathways(fraction=fraction)
return self._pathways_to_flux(paths, pathfluxes, n=self.nstates) |
<SYSTEM_TASK:>
r"""Computes the sets to coarse-grain the tpt flux to.
<END_TASK>
<USER_TASK:>
Description:
def _compute_coarse_sets(self, user_sets):
r"""Computes the sets to coarse-grain the tpt flux to.
Parameters
----------
(tpt_sets, A, B) with
tpt_sets : list of int-iterables
sets of states that shall be distinguished in the coarse-grained flux.
A : int-iterable
set indexes in A
B : int-iterable
set indexes in B
Returns
-------
sets : list of int-iterables
sets to compute tpt on. These sets still respect the boundary between
A, B and the intermediate tpt states.
Notes
-----
Given the sets that the user wants to distinguish, the
algorithm will create additional sets if necessary
* If states are missing in user_sets, they will be put into a
separate set
* If sets in user_sets are crossing the boundary between A, B and the
intermediates, they will be split at these boundaries. Thus each
set in user_sets can remain intact or be split into two or three
subsets
""" |
# set-ify everything
setA = set(self.A)
setB = set(self.B)
setI = set(self.I)
raw_sets = [set(user_set) for user_set in user_sets]
# anything missing? Compute all listed states
set_all = set(range(self.nstates))
set_all_user = []
for user_set in raw_sets:
set_all_user += user_set
set_all_user = set(set_all_user)
# ... and add all the unlisted states in a separate set
set_rest = set_all - set_all_user
if len(set_rest) > 0:
raw_sets.append(set_rest)
# split sets
Asets = []
Isets = []
Bsets = []
for raw_set in raw_sets:
s = raw_set.intersection(setA)
if len(s) > 0:
Asets.append(s)
s = raw_set.intersection(setI)
if len(s) > 0:
Isets.append(s)
s = raw_set.intersection(setB)
if len(s) > 0:
Bsets.append(s)
tpt_sets = Asets + Isets + Bsets
Aindexes = list(range(0, len(Asets)))
Bindexes = list(range(len(Asets) + len(Isets), len(tpt_sets)))
return (tpt_sets, Aindexes, Bindexes) |
<SYSTEM_TASK:>
r"""Coarse-grains the flux onto user-defined sets.
<END_TASK>
<USER_TASK:>
Description:
def coarse_grain(self, user_sets):
r"""Coarse-grains the flux onto user-defined sets.
Parameters
----------
user_sets : list of int-iterables
sets of states that shall be distinguished in the coarse-grained flux.
Returns
-------
(sets, tpt) : (list of int-iterables, tpt-object)
sets contains the sets tpt is computed on. The tpt states of the new
tpt object correspond to these sets of states in this order. Sets might
be identical, if the user has already provided a complete partition that
respects the boundary between A, B and the intermediates. If not, Sets
will have more members than provided by the user, containing the
"remainder" states and reflecting the splitting at the A and B
boundaries.
tpt contains a new tpt object for the coarse-grained flux. All its
quantities (gross_flux, net_flux, A, B, committor, backward_committor)
are coarse-grained to sets.
Notes
-----
All user-specified sets will be split (if necessary) to
preserve the boundary between A, B and the intermediate
states.
""" |
# coarse-grain sets
(tpt_sets, Aindexes, Bindexes) = self._compute_coarse_sets(user_sets)
nnew = len(tpt_sets)
# coarse-grain fluxHere we should branch between sparse and dense implementations, but currently there is only a
F_coarse = tptapi.coarsegrain(self._gross_flux, tpt_sets)
Fnet_coarse = tptapi.to_netflux(F_coarse)
# coarse-grain stationary probability and committors - this can be done all dense
pstat_coarse = np.zeros((nnew))
forward_committor_coarse = np.zeros((nnew))
backward_committor_coarse = np.zeros((nnew))
for i in range(0, nnew):
I = list(tpt_sets[i])
muI = self._mu[I]
pstat_coarse[i] = np.sum(muI)
partialI = muI / pstat_coarse[i] # normalized stationary probability over I
forward_committor_coarse[i] = np.dot(partialI, self._qplus[I])
backward_committor_coarse[i] = np.dot(partialI, self._qminus[I])
res = ReactiveFlux(Aindexes, Bindexes, Fnet_coarse, mu=pstat_coarse,
qminus=backward_committor_coarse, qplus=forward_committor_coarse, gross_flux=F_coarse)
return (tpt_sets, res) |
<SYSTEM_TASK:>
r"""Forward committor for birth-and-death-chain.
<END_TASK>
<USER_TASK:>
Description:
def committor_forward(self, a, b):
r"""Forward committor for birth-and-death-chain.
The forward committor is the probability to hit
state b before hitting state a starting in state x,
u_x=P_x(T_b<T_a)
T_i is the first arrival time of the chain to state i,
T_i = inf( t>0 | X_t=i )
Parameters
----------
a : int
State index
b : int
State index
Returns
-------
u : (M,) ndarray
Vector of committor probabilities.
""" |
u = np.zeros(self.dim)
g = np.zeros(self.dim - 1)
g[0] = 1.0
g[1:] = np.cumprod(self.q[1:-1] / self.p[1:-1])
"""If a and b are equal the event T_b<T_a is impossible
for any starting state x so that the committor is
zero everywhere"""
if a == b:
return u
elif a < b:
"""Birth-death chain has to hit a before it can hit b"""
u[0:a + 1] = 0.0
"""Birth-death chain has to hit b before it can hit a"""
u[b:] = 1.0
"""Intermediate states are given in terms of sums of g"""
u[a + 1:b] = np.cumsum(g[a:b])[0:-1] / np.sum(g[a:b])
return u
else:
u[0:b + 1] = 1.0
u[a:] = 0.0
u[b + 1:a] = (np.cumsum(g[b:a])[0:-1] / np.sum(g[b:a]))[::-1]
return u |
<SYSTEM_TASK:>
implementation of transition_matrix
<END_TASK>
<USER_TASK:>
Description:
def transition_matrix_non_reversible(C):
"""implementation of transition_matrix""" |
if not scipy.sparse.issparse(C):
C = scipy.sparse.csr_matrix(C)
rowsum = C.tocsr().sum(axis=1)
# catch div by zero
if np.min(rowsum) == 0.0:
raise ValueError("matrix C contains rows with sum zero.")
rowsum = np.array(1. / rowsum).flatten()
norm = scipy.sparse.diags(rowsum, 0)
return norm * C |
<SYSTEM_TASK:>
r"""Normalize transition matrix
<END_TASK>
<USER_TASK:>
Description:
def correct_transition_matrix(T, reversible=None):
r"""Normalize transition matrix
Fixes a the row normalization of a transition matrix.
To be used with the reversible estimators to fix an almost coverged
transition matrix.
Parameters
----------
T : (M, M) ndarray
matrix to correct
reversible : boolean
for future use
Returns
-------
(M, M) ndarray
corrected transition matrix
""" |
row_sums = T.sum(axis=1).A1
max_sum = np.max(row_sums)
if max_sum == 0.0:
max_sum = 1.0
return (T + scipy.sparse.diags(-row_sums+max_sum, 0)) / max_sum |
<SYSTEM_TASK:>
r"""Equilibrium expectation of given observable.
<END_TASK>
<USER_TASK:>
Description:
def expectation(P, obs):
r"""Equilibrium expectation of given observable.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs : (M,) ndarray
Observable, represented as vector on state space
Returns
-------
x : float
Expectation value
""" |
pi = statdist(P)
return np.dot(pi, obs) |
<SYSTEM_TASK:>
r"""Time-correlation for equilibrium experiment - via decomposition.
<END_TASK>
<USER_TASK:>
Description:
def correlation_decomp(P, obs1, obs2=None, times=[1], k=None):
r"""Time-correlation for equilibrium experiment - via decomposition.
Parameters
----------
P : (M, M) ndarray
Transition matrix
obs1 : (M,) ndarray
Observable, represented as vector on state space
obs2 : (M,) ndarray (optional)
Second observable, for cross-correlations
times : list of int (optional)
List of times (in tau) at which to compute correlation
k : int (optional)
Number of eigenvalues and eigenvectors to use for computation
Returns
-------
correlations : ndarray
Correlation values at given times
""" |
if obs2 is None:
obs2 = obs1
R, D, L = rdl_decomposition(P, k=k)
"""Stationary vector"""
mu = L[0, :]
"""Extract eigenvalues"""
ev = np.diagonal(D)
"""Amplitudes"""
amplitudes = np.dot(mu * obs1, R) * np.dot(L, obs2)
"""Propgate eigenvalues"""
times = np.asarray(times)
ev_t = ev[np.newaxis, :] ** times[:, np.newaxis]
"""Compute result"""
res = np.dot(ev_t, amplitudes)
"""Truncate imaginary part - should be zero anyways"""
res = res.real
return res |
<SYSTEM_TASK:>
upload file to a channel
<END_TASK>
<USER_TASK:>
Description:
def upload_file(token, channel_name, file_name):
""" upload file to a channel """ |
slack = Slacker(token)
slack.files.upload(file_name, channels=channel_name) |
<SYSTEM_TASK:>
Run the minimization.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
"""Run the minimization.
Returns
-------
K : (N,N) ndarray
the optimal rate matrix
""" |
if self.verbose:
self.selftest()
self.count = 0
if self.verbose:
logging.info('initial value of the objective function is %f'
% self.function(self.initial))
theta0 = self.initial
theta, f, d = fmin_l_bfgs_b(self.function_and_gradient, theta0, fprime=None, args=(),
approx_grad=False, bounds=self.bounds, factr=self.tol,
pgtol=1.0E-11, disp=0, maxiter=self.maxiter, maxfun=self.maxiter, maxls=100)
if self.verbose:
logging.info('l_bfgs_b says: '+str(d))
logging.info('objective function value reached: %f' % f)
if d['warnflag'] != 0:
raise_or_warn(str(d), on_error=self.on_error, warning=NotConvergedWarning, exception=NotConvergedError)
K = np.zeros((self.N, self.N))
K[self.I, self.J] = theta / self.pi[self.I]
K[self.J, self.I] = theta / self.pi[self.J]
np.fill_diagonal(K, -np.sum(K, axis=1))
self.K = K
return K |
<SYSTEM_TASK:>
Return string with quotes changed to preferred_quote if possible.
<END_TASK>
<USER_TASK:>
Description:
def unify_quotes(token_string, preferred_quote):
"""Return string with quotes changed to preferred_quote if possible.""" |
bad_quote = {'"': "'",
"'": '"'}[preferred_quote]
allowed_starts = {
'': bad_quote,
'f': 'f' + bad_quote,
'b': 'b' + bad_quote
}
if not any(token_string.startswith(start)
for start in allowed_starts.values()):
return token_string
if token_string.count(bad_quote) != 2:
return token_string
if preferred_quote in token_string:
return token_string
assert token_string.endswith(bad_quote)
assert len(token_string) >= 2
for prefix, start in allowed_starts.items():
if token_string.startswith(start):
chars_to_strip_from_front = len(start)
return '{prefix}{preferred_quote}{token}{preferred_quote}'.format(
prefix=prefix,
preferred_quote=preferred_quote,
token=token_string[chars_to_strip_from_front:-1]
) |
<SYSTEM_TASK:>
Run quotes unifying on files.
<END_TASK>
<USER_TASK:>
Description:
def _main(argv, standard_out, standard_error):
"""Run quotes unifying on files.
Returns `1` if any quoting changes are still needed, otherwise
`None`.
""" |
import argparse
parser = argparse.ArgumentParser(description=__doc__, prog='unify')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files instead of printing diffs')
parser.add_argument('-c', '--check-only', action='store_true',
help='exit with a status code of 1 if any changes are'
' still needed')
parser.add_argument('-r', '--recursive', action='store_true',
help='drill down directories recursively')
parser.add_argument('--quote', help='preferred quote', choices=["'", '"'],
default="'")
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('files', nargs='+',
help='files to format')
args = parser.parse_args(argv[1:])
filenames = list(set(args.files))
changes_needed = False
failure = False
while filenames:
name = filenames.pop(0)
if args.recursive and os.path.isdir(name):
for root, directories, children in os.walk(unicode(name)):
filenames += [os.path.join(root, f) for f in children
if f.endswith('.py') and
not f.startswith('.')]
directories[:] = [d for d in directories
if not d.startswith('.')]
else:
try:
if format_file(name, args=args, standard_out=standard_out):
changes_needed = True
except IOError as exception:
print(unicode(exception), file=standard_error)
failure = True
if failure or (args.check_only and changes_needed):
return 1 |
<SYSTEM_TASK:>
r"""Generate a count matrix from given microstate trajectory.
<END_TASK>
<USER_TASK:>
Description:
def count_matrix(dtraj, lag, sliding=True, sparse_return=True, nstates=None):
r"""Generate a count matrix from given microstate trajectory.
Parameters
----------
dtraj : array_like or list of array_like
Discretized trajectory or list of discretized trajectories
lag : int
Lagtime in trajectory steps
sliding : bool, optional
If true the sliding window approach
is used for transition counting.
sparse_return : bool (optional)
Whether to return a dense or a sparse matrix.
nstates : int, optional
Enforce a count-matrix with shape=(nstates, nstates)
Returns
-------
C : scipy.sparse.coo_matrix
The count matrix at given lag in coordinate list format.
Notes
-----
Transition counts can be obtained from microstate trajectory using
two methods. Couning at lag and slidingwindow counting.
**Lag**
This approach will skip all points in the trajectory that are
seperated form the last point by less than the given lagtime
:math:`\tau`.
Transition counts :math:`c_{ij}(\tau)` are generated according to
.. math:: c_{ij}(\tau) = \sum_{k=0}^{\left \lfloor \frac{N}{\tau} \right \rfloor -2}
\chi_{i}(X_{k\tau})\chi_{j}(X_{(k+1)\tau}).
:math:`\chi_{i}(x)` is the indicator function of :math:`i`, i.e
:math:`\chi_{i}(x)=1` for :math:`x=i` and :math:`\chi_{i}(x)=0` for
:math:`x \neq i`.
**Sliding**
The sliding approach slides along the trajectory and counts all
transitions sperated by the lagtime :math:`\tau`.
Transition counts :math:`c_{ij}(\tau)` are generated according to
.. math:: c_{ij}(\tau)=\sum_{k=0}^{N-\tau-1} \chi_{i}(X_{k}) \chi_{j}(X_{k+\tau}).
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import count_matrix
>>> dtraj = np.array([0, 0, 1, 0, 1, 1, 0])
>>> tau = 2
Use the sliding approach first
>>> C_sliding = count_matrix(dtraj, tau)
The generated matrix is a sparse matrix in CSR-format. For
convenient printing we convert it to a dense ndarray.
>>> C_sliding.toarray()
array([[ 1., 2.],
[ 1., 1.]])
Let us compare to the count-matrix we obtain using the lag
approach
>>> C_lag = count_matrix(dtraj, tau, sliding=False)
>>> C_lag.toarray()
array([[ 0., 1.],
[ 1., 1.]])
""" |
# convert dtraj input, if it contains out of nested python lists to
# a list of int ndarrays.
dtraj = _ensure_dtraj_list(dtraj)
return sparse.count_matrix.count_matrix_coo2_mult(dtraj, lag, sliding=sliding,
sparse=sparse_return, nstates=nstates) |
<SYSTEM_TASK:>
r"""Generates a randomly resampled count matrix given the input coordinates.
<END_TASK>
<USER_TASK:>
Description:
def bootstrap_counts(dtrajs, lagtime, corrlength=None):
r"""Generates a randomly resampled count matrix given the input coordinates.
Parameters
----------
dtrajs : array-like or array-like of array-like
single or multiple discrete trajectories. Every trajectory is assumed to be
a statistically independent realization. Note that this is often not true and
is a weakness with the present bootstrapping approach.
lagtime : int
the lag time at which the count matrix will be evaluated
corrlength : int, optional, default=None
the correlation length of the discrete trajectory. N / corrlength counts will be generated,
where N is the total number of frames. If set to None (default), corrlength = lagtime will be used.
Notes
-----
This function can be called multiple times in order to generate randomly
resampled realizations of count matrices. For each of these realizations
you can estimate a transition matrix, and from each of them computing the
observables of your interest. The standard deviation of such a sample of
the observable is a model for the standard error.
The bootstrap will be generated by sampling N/corrlength counts at time tuples (t, t+lagtime),
where t is uniformly sampled over all trajectory time frames in [0,n_i-lagtime].
Here, n_i is the length of trajectory i and N = sum_i n_i is the total number of frames.
See also
--------
bootstrap_trajectories
""" |
dtrajs = _ensure_dtraj_list(dtrajs)
return dense.bootstrapping.bootstrap_counts(dtrajs, lagtime, corrlength=corrlength) |
<SYSTEM_TASK:>
r"""Compute connected sets of microstates.
<END_TASK>
<USER_TASK:>
Description:
def connected_sets(C, directed=True):
r"""Compute connected sets of microstates.
Connected components for a directed graph with edge-weights
given by the count matrix.
Parameters
----------
C : scipy.sparse matrix
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
cc : list of arrays of integers
Each entry is an array containing all vertices (states) in the
corresponding connected component. The list is sorted
according to the size of the individual components. The
largest connected set is the first entry in the list, lcc=cc[0].
Notes
-----
Viewing the count matrix as the adjacency matrix of a (directed) graph
the connected components are given by the connected components of that
graph. Connected components of a graph can be efficiently computed
using Tarjan's algorithm.
References
----------
.. [1] Tarjan, R E. 1972. Depth-first search and linear graph
algorithms. SIAM Journal on Computing 1 (2): 146-160.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import connected_sets
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]])
>>> cc_directed = connected_sets(C)
>>> cc_directed
[array([0, 1]), array([2])]
>>> cc_undirected = connected_sets(C, directed=False)
>>> cc_undirected
[array([0, 1, 2])]
""" |
if isdense(C):
return sparse.connectivity.connected_sets(csr_matrix(C), directed=directed)
else:
return sparse.connectivity.connected_sets(C, directed=directed) |
<SYSTEM_TASK:>
r"""Largest connected component for a directed graph with edge-weights
<END_TASK>
<USER_TASK:>
Description:
def largest_connected_set(C, directed=True):
r"""Largest connected component for a directed graph with edge-weights
given by the count matrix.
Parameters
----------
C : scipy.sparse matrix
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
lcc : array of integers
The largest connected component of the directed graph.
See also
--------
connected_sets
Notes
-----
Viewing the count matrix as the adjacency matrix of a (directed)
graph the largest connected set is the largest connected set of
nodes of the corresponding graph. The largest connected set of a graph
can be efficiently computed using Tarjan's algorithm.
References
----------
.. [1] Tarjan, R E. 1972. Depth-first search and linear graph
algorithms. SIAM Journal on Computing 1 (2): 146-160.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import largest_connected_set
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]])
>>> lcc_directed = largest_connected_set(C)
>>> lcc_directed
array([0, 1])
>>> lcc_undirected = largest_connected_set(C, directed=False)
>>> lcc_undirected
array([0, 1, 2])
""" |
if isdense(C):
return sparse.connectivity.largest_connected_set(csr_matrix(C), directed=directed)
else:
return sparse.connectivity.largest_connected_set(C, directed=directed) |
<SYSTEM_TASK:>
r"""Compute the count matrix on the largest connected set.
<END_TASK>
<USER_TASK:>
Description:
def largest_connected_submatrix(C, directed=True, lcc=None):
r"""Compute the count matrix on the largest connected set.
Parameters
----------
C : scipy.sparse matrix
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True
lcc : (M,) ndarray, optional
The largest connected set
Returns
-------
C_cc : scipy.sparse matrix
Count matrix of largest completely
connected set of vertices (states)
See also
--------
largest_connected_set
Notes
-----
Viewing the count matrix as the adjacency matrix of a (directed)
graph the larest connected submatrix is the adjacency matrix of
the largest connected set of the corresponding graph. The largest
connected submatrix can be efficiently computed using Tarjan's algorithm.
References
----------
.. [1] Tarjan, R E. 1972. Depth-first search and linear graph
algorithms. SIAM Journal on Computing 1 (2): 146-160.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import largest_connected_submatrix
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]])
>>> C_cc_directed = largest_connected_submatrix(C)
>>> C_cc_directed # doctest: +ELLIPSIS
array([[10, 1],
[ 2, 0]]...)
>>> C_cc_undirected = largest_connected_submatrix(C, directed=False)
>>> C_cc_undirected # doctest: +ELLIPSIS
array([[10, 1, 0],
[ 2, 0, 3],
[ 0, 0, 4]]...)
""" |
if isdense(C):
return sparse.connectivity.largest_connected_submatrix(csr_matrix(C), directed=directed, lcc=lcc).toarray()
else:
return sparse.connectivity.largest_connected_submatrix(C, directed=directed, lcc=lcc) |
<SYSTEM_TASK:>
Check connectivity of the given matrix.
<END_TASK>
<USER_TASK:>
Description:
def is_connected(C, directed=True):
"""Check connectivity of the given matrix.
Parameters
----------
C : scipy.sparse matrix
Count matrix specifying edge weights.
directed : bool, optional
Whether to compute connected components for a directed or
undirected graph. Default is True.
Returns
-------
is_connected: bool
True if C is connected, False otherwise.
See also
--------
largest_connected_submatrix
Notes
-----
A count matrix is connected if the graph having the count matrix
as adjacency matrix has a single connected component. Connectivity
of a graph can be efficiently checked using Tarjan's algorithm.
References
----------
.. [1] Tarjan, R E. 1972. Depth-first search and linear graph
algorithms. SIAM Journal on Computing 1 (2): 146-160.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import is_connected
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 0, 4]])
>>> is_connected(C)
False
>>> is_connected(C, directed=False)
True
""" |
if isdense(C):
return sparse.connectivity.is_connected(csr_matrix(C), directed=directed)
else:
return sparse.connectivity.is_connected(C, directed=directed) |
<SYSTEM_TASK:>
r"""Neighbor prior for the given count matrix.
<END_TASK>
<USER_TASK:>
Description:
def prior_neighbor(C, alpha=0.001):
r"""Neighbor prior for the given count matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray or scipy.sparse matrix
Prior count matrix
Notes
------
The neighbor prior :math:`b_{ij}` is defined as
.. math:: b_{ij}=\left \{ \begin{array}{rl}
\alpha & c_{ij}+c_{ji}>0 \\
0 & \text{else}
\end{array} \right .
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import prior_neighbor
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 1, 4]])
>>> B = prior_neighbor(C)
>>> B
array([[ 0.001, 0.001, 0. ],
[ 0.001, 0. , 0.001],
[ 0. , 0.001, 0.001]])
""" |
if isdense(C):
B = sparse.prior.prior_neighbor(csr_matrix(C), alpha=alpha)
return B.toarray()
else:
return sparse.prior.prior_neighbor(C, alpha=alpha) |
<SYSTEM_TASK:>
r"""Constant prior for given count matrix.
<END_TASK>
<USER_TASK:>
Description:
def prior_const(C, alpha=0.001):
r"""Constant prior for given count matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
alpha : float (optional)
Value of prior counts
Returns
-------
B : (M, M) ndarray
Prior count matrix
Notes
-----
The prior is defined as
.. math:: \begin{array}{rl} b_{ij}= \alpha & \forall i, j \end{array}
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import prior_const
>>> C = np.array([[10, 1, 0], [2, 0, 3], [0, 1, 4]])
>>> B = prior_const(C)
>>> B
array([[ 0.001, 0.001, 0.001],
[ 0.001, 0.001, 0.001],
[ 0.001, 0.001, 0.001]])
""" |
if isdense(C):
return sparse.prior.prior_const(C, alpha=alpha)
else:
warnings.warn("Prior will be a dense matrix for sparse input")
return sparse.prior.prior_const(C, alpha=alpha) |
<SYSTEM_TASK:>
r"""Estimate the transition matrix from the given countmatrix.
<END_TASK>
<USER_TASK:>
Description:
def transition_matrix(C, reversible=False, mu=None, method='auto', **kwargs):
r"""Estimate the transition matrix from the given countmatrix.
Parameters
----------
C : numpy ndarray or scipy.sparse matrix
Count matrix
reversible : bool (optional)
If True restrict the ensemble of transition matrices
to those having a detailed balance symmetry otherwise
the likelihood optimization is carried out over the whole
space of stochastic matrices.
mu : array_like
The stationary distribution of the MLE transition matrix.
method : str
Select which implementation to use for the estimation.
One of 'auto', 'dense' and 'sparse', optional, default='auto'.
'dense' always selects the dense implementation, 'sparse' always selects
the sparse one.
'auto' selects the most efficient implementation according to
the sparsity structure of the matrix: if the occupation of the C
matrix is less then one third, select sparse. Else select dense.
The type of the T matrix returned always matches the type of the
C matrix, irrespective of the method that was used to compute it.
**kwargs: Optional algorithm-specific parameters. See below for special cases
Xinit : (M, M) ndarray
Optional parameter with reversible = True.
initial value for the matrix of absolute transition probabilities. Unless set otherwise,
will use X = diag(pi) t, where T is a nonreversible transition matrix estimated from C,
i.e. T_ij = c_ij / sum_k c_ik, and pi is its stationary distribution.
maxiter : 1000000 : int
Optional parameter with reversible = True.
maximum number of iterations before the method exits
maxerr : 1e-8 : float
Optional parameter with reversible = True.
convergence tolerance for transition matrix estimation.
This specifies the maximum change of the Euclidean norm of relative
stationary probabilities (:math:`x_i = \sum_k x_{ik}`). The relative stationary probability changes
:math:`e_i = (x_i^{(1)} - x_i^{(2)})/(x_i^{(1)} + x_i^{(2)})` are used in order to track changes in small
probabilities. The Euclidean norm of the change vector, :math:`|e_i|_2`, is compared to maxerr.
rev_pisym : bool, default=False
Fast computation of reversible transition matrix by normalizing
:math:`x_{ij} = \pi_i p_{ij} + \pi_j p_{ji}`. :math:`p_{ij}` is the direct
(nonreversible) estimate and :math:`pi_i` is its stationary distribution.
This estimator is asympotically unbiased but not maximum likelihood.
return_statdist : bool, default=False
Optional parameter with reversible = True.
If set to true, the stationary distribution is also returned
return_conv : bool, default=False
Optional parameter with reversible = True.
If set to true, the likelihood history and the pi_change history is returned.
warn_not_converged : bool, default=True
Prints a warning if not converged.
sparse_newton : bool, default=False
If True, use the experimental primal-dual interior-point solver for sparse input/computation method.
Returns
-------
P : (M, M) ndarray or scipy.sparse matrix
The MLE transition matrix. P has the same data type (dense or sparse)
as the input matrix C.
The reversible estimator returns by default only P, but may also return
(P,pi) or (P,lhist,pi_changes) or (P,pi,lhist,pi_changes) depending on the return settings
P : ndarray (n,n)
transition matrix. This is the only return for return_statdist = False, return_conv = False
(pi) : ndarray (n)
stationary distribution. Only returned if return_statdist = True
(lhist) : ndarray (k)
likelihood history. Has the length of the number of iterations needed.
Only returned if return_conv = True
(pi_changes) : ndarray (k)
history of likelihood history. Has the length of the number of iterations needed.
Only returned if return_conv = True
Notes
-----
The transition matrix is a maximum likelihood estimate (MLE) of
the probability distribution of transition matrices with
parameters given by the count matrix.
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
.. [2] Bowman, G R, K A Beauchamp, G Boxer and V S Pande. 2009.
Progress and challenges in the automated construction of Markov state models for full protein systems.
J. Chem. Phys. 131: 124101
.. [3] Trendelkamp-Schroer, B, H Wu, F Paul and F. Noe. 2015
Estimation and uncertainty of reversible Markov models.
J. Chem. Phys. 143: 174101
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import transition_matrix
>>> C = np.array([[10, 1, 1], [2, 0, 3], [0, 1, 4]])
Non-reversible estimate
>>> T_nrev = transition_matrix(C)
>>> T_nrev
array([[ 0.83333333, 0.08333333, 0.08333333],
[ 0.4 , 0. , 0.6 ],
[ 0. , 0.2 , 0.8 ]])
Reversible estimate
>>> T_rev = transition_matrix(C, reversible=True)
>>> T_rev
array([[ 0.83333333, 0.10385551, 0.06281115],
[ 0.35074677, 0. , 0.64925323],
[ 0.04925323, 0.15074677, 0.8 ]])
Reversible estimate with given stationary vector
>>> mu = np.array([0.7, 0.01, 0.29])
>>> T_mu = transition_matrix(C, reversible=True, mu=mu)
>>> T_mu
array([[ 0.94771371, 0.00612645, 0.04615984],
[ 0.42885157, 0. , 0.57114843],
[ 0.11142031, 0.01969477, 0.86888491]])
""" |
if issparse(C):
sparse_input_type = True
elif isdense(C):
sparse_input_type = False
else:
raise NotImplementedError('C has an unknown type.')
if method == 'dense':
sparse_computation = False
elif method == 'sparse':
sparse_computation = True
elif method == 'auto':
# heuristically determine whether is't more efficient to do a dense of sparse computation
if sparse_input_type:
dof = C.getnnz()
else:
dof = np.count_nonzero(C)
dimension = C.shape[0]
if dimension*dimension < 3*dof:
sparse_computation = False
else:
sparse_computation = True
else:
raise ValueError(('method="%s" is no valid choice. It should be one of'
'"dense", "sparse" or "auto".') % method)
# convert input type
if sparse_computation and not sparse_input_type:
C = coo_matrix(C)
if not sparse_computation and sparse_input_type:
C = C.toarray()
return_statdist = 'return_statdist' in kwargs
if not return_statdist:
kwargs['return_statdist'] = False
sparse_newton = kwargs.pop('sparse_newton', False)
if reversible:
rev_pisym = kwargs.pop('rev_pisym', False)
if mu is None:
if sparse_computation:
if rev_pisym:
result = sparse.transition_matrix.transition_matrix_reversible_pisym(C, **kwargs)
elif sparse_newton:
from msmtools.estimation.sparse.newton.mle_rev import solve_mle_rev
result = solve_mle_rev(C, **kwargs)
else:
result = sparse.mle_trev.mle_trev(C, **kwargs)
else:
if rev_pisym:
result = dense.transition_matrix.transition_matrix_reversible_pisym(C, **kwargs)
else:
result = dense.mle_trev.mle_trev(C, **kwargs)
else:
kwargs.pop('return_statdist') # pi given, keyword unknown by estimators.
if sparse_computation:
# Sparse, reversible, fixed pi (currently using dense with sparse conversion)
result = sparse.mle_trev_given_pi.mle_trev_given_pi(C, mu, **kwargs)
else:
result = dense.mle_trev_given_pi.mle_trev_given_pi(C, mu, **kwargs)
else: # nonreversible estimation
if mu is None:
if sparse_computation:
# Sparse, nonreversible
result = sparse.transition_matrix.transition_matrix_non_reversible(C)
else:
# Dense, nonreversible
result = dense.transition_matrix.transition_matrix_non_reversible(C)
# Both methods currently do not have an iterate of pi, so we compute it here for consistency.
if return_statdist:
from msmtools.analysis import stationary_distribution
mu = stationary_distribution(result)
else:
raise NotImplementedError('nonreversible mle with fixed stationary distribution not implemented.')
if return_statdist and isinstance(result, tuple):
T, mu = result
else:
T = result
# convert return type
if sparse_computation and not sparse_input_type:
T = T.toarray()
elif not sparse_computation and sparse_input_type:
T = csr_matrix(T)
if return_statdist:
return T, mu
return T |
<SYSTEM_TASK:>
r"""Log-likelihood of the count matrix given a transition matrix.
<END_TASK>
<USER_TASK:>
Description:
def log_likelihood(C, T):
r"""Log-likelihood of the count matrix given a transition matrix.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
T : (M, M) ndarray orscipy.sparse matrix
Transition matrix
Returns
-------
logL : float
Log-likelihood of the count matrix
Notes
-----
The likelihood of a set of observed transition counts
:math:`C=(c_{ij})` for a given matrix of transition counts
:math:`T=(t_{ij})` is given by
.. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The log-likelihood is given by
.. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}.
The likelihood describes the probability of making an observation
:math:`C` for a given model :math:`P`.
Examples
--------
>>> import numpy as np
>>> from msmtools.estimation import log_likelihood
>>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]])
>>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-38.2808034725...
>>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]])
>>> logL = log_likelihood(C, T)
>>> logL # doctest: +ELLIPSIS
-68.2144096814...
References
----------
.. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D
Chodera, C Schuette and F Noe. 2011. Markov models of
molecular kinetics: Generation and validation. J Chem Phys
134: 174105
""" |
if issparse(C) and issparse(T):
return sparse.likelihood.log_likelihood(C, T)
else:
# use the dense likelihood calculator for all other cases
# if a mix of dense/sparse C/T matrices is used, then both
# will be converted to ndarrays.
if not isinstance(C, np.ndarray):
C = np.array(C)
if not isinstance(T, np.ndarray):
T = np.array(T)
# computation is still efficient, because we only use terms
# for nonzero elements of T
nz = np.nonzero(T)
return np.dot(C[nz], np.log(T[nz])) |
<SYSTEM_TASK:>
r"""Covariance tensor for non-reversible transition matrix posterior.
<END_TASK>
<USER_TASK:>
Description:
def tmatrix_cov(C, k=None):
r"""Covariance tensor for non-reversible transition matrix posterior.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
k : int (optional)
Return only covariance matrix for entires in the k-th row of
the transition matrix
Returns
-------
cov : (M, M, M) ndarray
Covariance tensor for transition matrix posterior
Notes
-----
The posterior of non-reversible transition matrices is
.. math:: \mathbb{P}(T|C) \propto \prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
Each row in the transition matrix is distributed according to a
Dirichlet distribution with parameters given by the observed
transition counts :math:`c_{ij}`.
The covariance tensor
:math:`\text{cov}[p_{ij},p_{kl}]=\Sigma_{i,j,k,l}` is zero
whenever :math:`i \neq k` so that only :math:`\Sigma_{i,j,i,l}` is
returned.
""" |
if issparse(C):
warnings.warn("Covariance matrix will be dense for sparse input")
C = C.toarray()
return dense.covariance.tmatrix_cov(C, row=k) |
<SYSTEM_TASK:>
r"""samples transition matrices from the posterior distribution
<END_TASK>
<USER_TASK:>
Description:
def sample_tmatrix(C, nsample=1, nsteps=None, reversible=False, mu=None, T0=None, return_statdist=False):
r"""samples transition matrices from the posterior distribution
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
nsample : int
number of samples to be drawn
nstep : int, default=None
number of full Gibbs sampling sweeps internally done for each sample
returned. This option is meant to ensure approximately uncorrelated
samples for every call to sample(). If None, the number of steps will
be automatically determined based on the other options and the matrix
size. nstep>1 will only be used for reversible sampling, because
nonreversible sampling generates statistically independent transition
matrices every step.
reversible : bool
If true sample from the ensemble of transition matrices
restricted to those obeying a detailed balance condition,
else draw from the whole ensemble of stochastic matrices.
mu : array_like
A fixed stationary distribution. Transition matrices with that stationary distribution will be sampled
T0 : ndarray, shape=(n, n) or scipy.sparse matrix
Starting point of the MC chain of the sampling algorithm.
Has to obey the required constraints.
return_statdist : bool, optional, default = False
if true, will also return the stationary distribution.
Returns
-------
P : ndarray(n,n) or array of ndarray(n,n)
sampled transition matrix (or multiple matrices if nsample > 1)
Notes
-----
The transition matrix sampler generates transition matrices from
the posterior distribution. The posterior distribution is given as
a product of Dirichlet distributions
.. math:: \mathbb{P}(T|C) \propto \prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
See also
--------
tmatrix_sampler
""" |
if issparse(C):
_showSparseConversionWarning()
C = C.toarray()
sampler = tmatrix_sampler(C, reversible=reversible, mu=mu, T0=T0, nsteps=nsteps)
return sampler.sample(nsamples=nsample, return_statdist=return_statdist) |
<SYSTEM_TASK:>
r"""Generate transition matrix sampler object.
<END_TASK>
<USER_TASK:>
Description:
def tmatrix_sampler(C, reversible=False, mu=None, T0=None, nsteps=None, prior='sparse'):
r"""Generate transition matrix sampler object.
Parameters
----------
C : (M, M) ndarray or scipy.sparse matrix
Count matrix
reversible : bool
If true sample from the ensemble of transition matrices
restricted to those obeying a detailed balance condition,
else draw from the whole ensemble of stochastic matrices.
mu : array_like
A fixed stationary distribution. Transition matrices with that
stationary distribution will be sampled
T0 : ndarray, shape=(n, n) or scipy.sparse matrix
Starting point of the MC chain of the sampling algorithm.
Has to obey the required constraints.
nstep : int, default=None
number of full Gibbs sampling sweeps per sample. This option is meant
to ensure approximately uncorrelated samples for every call to
sample(). If None, the number of steps will be automatically determined
based on the other options and the matrix size. nstep>1 will only be
used for reversible sampling, because nonreversible sampling generates
statistically independent transition matrices every step.
Returns
-------
sampler : A :py:class:dense.tmatrix_sampler.TransitionMatrixSampler object that can be used to generate samples.
Notes
-----
The transition matrix sampler generates transition matrices from
the posterior distribution. The posterior distribution is given as
a product of Dirichlet distributions
.. math:: \mathbb{P}(T|C) \propto \prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right)
The method can generate samples from the posterior under the following constraints
**Reversible sampling**
Using a MCMC sampler outlined in .. [1] it is ensured that samples
from the posterior are reversible, i.e. there is a probability
vector :math:`(\mu_i)` such that :math:`\mu_i t_{ij} = \mu_j
t_{ji}` holds for all :math:`i,j`.
**Reversible sampling with fixed stationary vector**
Using a MCMC sampler outlined in .. [2] it is ensured that samples
from the posterior fulfill detailed balance with respect to a given
probability vector :math:`(\mu_i)`.
References
----------
.. [1] Noe, F. Probability distributions of molecular observables
computed from Markov state models. J Chem Phys 128: 244103 (2008)
.. [2] Trendelkamp-Schroer, B., H. Wu, F. Paul and F. Noe: Estimation and
uncertainty of reversible Markov models. J. Chem. Phys. (submitted)
""" |
if issparse(C):
_showSparseConversionWarning()
C = C.toarray()
from .dense.tmatrix_sampler import TransitionMatrixSampler
sampler = TransitionMatrixSampler(C, reversible=reversible, mu=mu, P0=T0,
nsteps=nsteps, prior=prior)
return sampler |
<SYSTEM_TASK:>
r"""Remove all negative entries from sparse matrix.
<END_TASK>
<USER_TASK:>
Description:
def remove_negative_entries(A):
r"""Remove all negative entries from sparse matrix.
Aplus=max(0, A)
Parameters
----------
A : (M, M) scipy.sparse matrix
Input matrix
Returns
-------
Aplus : (M, M) scipy.sparse matrix
Input matrix with negative entries set to zero.
""" |
A = A.tocoo()
data = A.data
row = A.row
col = A.col
"""Positive entries"""
pos = data > 0.0
datap = data[pos]
rowp = row[pos]
colp = col[pos]
Aplus = coo_matrix((datap, (rowp, colp)), shape=A.shape)
return Aplus |
<SYSTEM_TASK:>
r"""Compute the flux.
<END_TASK>
<USER_TASK:>
Description:
def flux_matrix(T, pi, qminus, qplus, netflux=True):
r"""Compute the flux.
Parameters
----------
T : (M, M) scipy.sparse matrix
Transition matrix
pi : (M,) ndarray
Stationary distribution corresponding to T
qminus : (M,) ndarray
Backward comittor
qplus : (M,) ndarray
Forward committor
netflux : boolean
True: net flux matrix will be computed
False: gross flux matrix will be computed
Returns
-------
flux : (M, M) scipy.sparse matrix
Matrix of flux values between pairs of states.
""" |
D1 = diags((pi * qminus,), (0,))
D2 = diags((qplus,), (0,))
flux = D1.dot(T.dot(D2))
"""Remove self-fluxes"""
flux = flux - diags(flux.diagonal(), 0)
"""Return net or gross flux"""
if netflux:
return to_netflux(flux)
else:
return flux |
<SYSTEM_TASK:>
r"""Compute the netflux.
<END_TASK>
<USER_TASK:>
Description:
def to_netflux(flux):
r"""Compute the netflux.
f_ij^{+}=max{0, f_ij-f_ji}
for all pairs i,j
Parameters
----------
flux : (M, M) scipy.sparse matrix
Matrix of flux values between pairs of states.
Returns
-------
netflux : (M, M) scipy.sparse matrix
Matrix of netflux values between pairs of states.
""" |
netflux = flux - flux.T
"""Set negative entries to zero"""
netflux = remove_negative_entries(netflux)
return netflux |
<SYSTEM_TASK:>
r"""Compute the total flux between reactant and product.
<END_TASK>
<USER_TASK:>
Description:
def total_flux(flux, A):
r"""Compute the total flux between reactant and product.
Parameters
----------
flux : (M, M) scipy.sparse matrix
Matrix of flux values between pairs of states.
A : array_like
List of integer state labels for set A (reactant)
Returns
-------
F : float
The total flux between reactant and product
""" |
X = set(np.arange(flux.shape[0])) # total state space
A = set(A)
notA = X.difference(A)
"""Extract rows corresponding to A"""
W = flux.tocsr()
W = W[list(A), :]
"""Extract columns corresonding to X\A"""
W = W.tocsc()
W = W[:, list(notA)]
F = W.sum()
return F |
<SYSTEM_TASK:>
r"""Calculate the sensitivity matrix for entry j the stationary
<END_TASK>
<USER_TASK:>
Description:
def stationary_distribution_sensitivity(T, j):
r"""Calculate the sensitivity matrix for entry j the stationary
distribution vector given transition matrix T.
Parameters
----------
T : numpy.ndarray shape = (n, n)
Transition matrix
j : int
entry of stationary distribution for which the sensitivity is to be computed
Returns
-------
x : ndarray, shape=(n, n)
Sensitivity matrix for entry index around transition matrix T. Reversibility is not assumed.
Remark
------
Note, that this function uses a different normalization convention for the sensitivity compared to
eigenvector_sensitivity. See there for further information.
""" |
n = len(T)
lEV = numpy.ones(n)
rEV = stationary_distribution(T)
eVal = 1.0
T = numpy.transpose(T)
vecA = numpy.zeros(n)
vecA[j] = 1.0
matA = T - eVal * numpy.identity(n)
# normalize s.t. sum is one using rEV which is constant
matA = numpy.concatenate((matA, [lEV]))
phi = numpy.linalg.lstsq(numpy.transpose(matA), vecA, rcond=-1)
phi = numpy.delete(phi[0], -1)
sensitivity = -numpy.outer(rEV, phi) + numpy.dot(phi, rEV) * numpy.outer(rEV, lEV)
return sensitivity |
<SYSTEM_TASK:>
Compute finite geometric series.
<END_TASK>
<USER_TASK:>
Description:
def geometric_series(q, n):
"""
Compute finite geometric series.
\frac{1-q^{n+1}}{1-q} q \neq 1
\sum_{k=0}^{n} q^{k}=
n+1 q = 1
Parameters
----------
q : array-like
The common ratio of the geometric series.
n : int
The number of terms in the finite series.
Returns
-------
s : float or ndarray
The value of the finite series.
""" |
q = np.asarray(q)
if n < 0:
raise ValueError('Finite geometric series is only defined for n>=0.')
else:
"""q is scalar"""
if q.ndim == 0:
if q == 1:
s = (n + 1) * 1.0
return s
else:
s = (1.0 - q ** (n + 1)) / (1.0 - q)
return s
"""q is ndarray"""
s = np.zeros(np.shape(q), dtype=q.dtype)
"""All elements with value q=1"""
ind = (q == 1.0)
"""For q=1 the sum has the value s=n+1"""
s[ind] = (n + 1) * 1.0
"""All elements with value q\neq 1"""
not_ind = np.logical_not(ind)
s[not_ind] = (1.0 - q[not_ind] ** (n + 1)) / (1.0 - q[not_ind])
return s |
<SYSTEM_TASK:>
Number of states
<END_TASK>
<USER_TASK:>
Description:
def solve_mle_rev(C, tol=1e-10, maxiter=100, show_progress=False, full_output=False,
return_statdist=True, **kwargs):
"""Number of states""" |
M = C.shape[0]
"""Initial guess for primal-point"""
z0 = np.zeros(2*M)
z0[0:M] = 1.0
"""Inequality constraints"""
# G = np.zeros((M, 2*M))
# G[np.arange(M), np.arange(M)] = -1.0
G = -1.0*scipy.sparse.eye(M, n=2*M, k=0)
h = np.zeros(M)
"""Equality constraints"""
A = np.zeros((1, 2*M))
A[0, M] = 1.0
b = np.array([0.0])
"""Scaling"""
c0 = C.max()
C = C/c0
"""Symmetric part"""
Cs = C + C.T
"""Column sum"""
c = C.sum(axis=0)
if scipy.sparse.issparse(C):
Cs = Cs.tocsr()
c = c.A1
A = scipy.sparse.csr_matrix(A)
F = objective_sparse.F
DF = objective_sparse.DFsym
convert_solution = objective_sparse.convert_solution
else:
F = objective_dense.F
DF = objective_dense.DF
convert_solution = objective_dense.convert_solution
"""PDIP iteration"""
res = primal_dual_solve(F, z0, DF, A, b, G, h,
args=(Cs, c),
maxiter=maxiter, tol=tol,
show_progress=show_progress,
full_output=full_output)
if full_output:
z, info = res
else:
z = res
pi, P = convert_solution(z, Cs)
result = [P]
if return_statdist:
result.append(pi)
if full_output:
result.append(info)
return tuple(result) if len(result) > 1 else result[0] |
<SYSTEM_TASK:>
Match rule and set attribute codes.
<END_TASK>
<USER_TASK:>
Description:
def match(self, filename, line, codes):
"""Match rule and set attribute codes.""" |
if self.regex_match_any(line, codes):
if self._vary_codes:
self.codes = tuple([codes[-1]])
return True |
<SYSTEM_TASK:>
if module found load module and save all attributes in the module found
<END_TASK>
<USER_TASK:>
Description:
def __extract_modules(self, loader, name, is_pkg):
""" if module found load module and save all attributes in the module found """ |
mod = loader.find_module(name).load_module(name)
""" find the attribute method on each module """
if hasattr(mod, '__method__'):
""" register to the blueprint if method attribute found """
module_router = ModuleRouter(mod,
ignore_names=self.__serialize_module_paths()
).register_route(app=self.application, name=name)
self.__routers.extend(module_router.routers)
self.__modules.append(mod)
else:
""" prompt not found notification """
# print('{} has no module attribute method'.format(mod))
pass |
<SYSTEM_TASK:>
Returns the number of months that have already passed in the given year.
<END_TASK>
<USER_TASK:>
Description:
def get_months_of_year(year):
"""
Returns the number of months that have already passed in the given year.
This is useful for calculating averages on the year view. For past years,
we should divide by 12, but for the current year, we should divide by
the current month.
""" |
current_year = now().year
if year == current_year:
return now().month
if year > current_year:
return 1
if year < current_year:
return 12 |
<SYSTEM_TASK:>
Returns transaction totals grouped by Payee.
<END_TASK>
<USER_TASK:>
Description:
def get_totals_by_payee(self, account, start_date=None, end_date=None):
"""
Returns transaction totals grouped by Payee.
""" |
qs = Transaction.objects.filter(account=account, parent__isnull=True)
qs = qs.values('payee').annotate(models.Sum('value_gross'))
qs = qs.order_by('payee__name')
return qs |
<SYSTEM_TASK:>
Returns transactions that don't have an invoice.
<END_TASK>
<USER_TASK:>
Description:
def get_without_invoice(self):
"""
Returns transactions that don't have an invoice.
We filter out transactions that have children, because those
transactions never have invoices - their children are the ones that
would each have one invoice.
""" |
qs = Transaction.objects.filter(
children__isnull=True, invoice__isnull=True)
return qs |
<SYSTEM_TASK:>
Wrapped function for filtering enabled providers.
<END_TASK>
<USER_TASK:>
Description:
def _get_enabled():
"""Wrapped function for filtering enabled providers.""" |
providers = Provider.objects.all()
return [p for p in providers if p.enabled()] |
<SYSTEM_TASK:>
Run hg status.
<END_TASK>
<USER_TASK:>
Description:
def status_mercurial(path, ignore_set, options):
"""Run hg status.
Returns a 2-element tuple:
* Text lines describing the status of the repository.
* Empty sequence of subrepos, since hg does not support them.
""" |
lines = run(['hg', '--config', 'extensions.color=!', 'st'], cwd=path)
subrepos = ()
return [b' ' + l for l in lines if not l.startswith(b'?')], subrepos |
<SYSTEM_TASK:>
Run git status.
<END_TASK>
<USER_TASK:>
Description:
def status_git(path, ignore_set, options):
"""Run git status.
Returns a 2-element tuple:
* Text lines describing the status of the repository.
* List of subrepository paths, relative to the repository itself.
""" |
# Check whether current branch is dirty:
lines = [l for l in run(('git', 'status', '-s', '-b'), cwd=path)
if (options.untracked or not l.startswith(b'?'))
and not l.startswith(b'##')]
# Check all branches for unpushed commits:
lines += [l for l in run(('git', 'branch', '-v'), cwd=path)
if (b' [ahead ' in l)]
# Check for non-tracking branches:
if options.non_tracking:
lines += [l for l in run(('git', 'for-each-ref',
'--format=[%(refname:short)]%(upstream)',
'refs/heads'), cwd=path)
if l.endswith(b']')]
if options.stash:
lines += [l for l in run(('git', 'stash', 'list'), cwd=path)]
discovered_submodules = []
for l in run(('git', 'submodule', 'status'), cwd=path):
match = git_submodule.search(l)
if match:
discovered_submodules.append(match.group(1))
return lines, discovered_submodules |
<SYSTEM_TASK:>
Run svn status.
<END_TASK>
<USER_TASK:>
Description:
def status_subversion(path, ignore_set, options):
"""Run svn status.
Returns a 2-element tuple:
* Text lines describing the status of the repository.
* Empty sequence of subrepos, since svn does not support them.
""" |
subrepos = ()
if path in ignore_set:
return None, subrepos
keepers = []
for line in run(['svn', 'st', '-v'], cwd=path):
if not line.strip():
continue
if line.startswith(b'Performing') or line[0] in b'X?':
continue
status = line[:8]
ignored_states = options.ignore_svn_states
if ignored_states and status.strip() in ignored_states:
continue
filename = line[8:].split(None, 3)[-1]
ignore_set.add(os.path.join(path, filename))
if status.strip():
keepers.append(b' ' + status + filename)
return keepers, subrepos |
<SYSTEM_TASK:>
Get pep8 reporter state from stack.
<END_TASK>
<USER_TASK:>
Description:
def get_reporter_state():
"""Get pep8 reporter state from stack.""" |
# Stack
# 1. get_reporter_state (i.e. this function)
# 2. putty_ignore_code
# 3. QueueReport.error or pep8.StandardReport.error for flake8 -j 1
# 4. pep8.Checker.check_ast or check_physical or check_logical
# locals contains `tree` (ast) for check_ast
frame = sys._getframe(3)
reporter = frame.f_locals['self']
line_number = frame.f_locals['line_number']
offset = frame.f_locals['offset']
text = frame.f_locals['text']
check = frame.f_locals['check']
return reporter, line_number, offset, text, check |
<SYSTEM_TASK:>
Add options for command line and config file.
<END_TASK>
<USER_TASK:>
Description:
def add_options(cls, parser):
"""Add options for command line and config file.""" |
parser.add_option(
'--putty-select', metavar='errors', default='',
help='putty select list',
)
parser.add_option(
'--putty-ignore', metavar='errors', default='',
help='putty ignore list',
)
parser.add_option(
'--putty-no-auto-ignore', action='store_false',
dest='putty_auto_ignore', default=False,
help=(' (default) do not auto ignore lines matching '
'# flake8: disable=<code>,<code>'),
)
parser.add_option(
'--putty-auto-ignore', action='store_true',
dest='putty_auto_ignore', default=False,
help=('auto ignore lines matching '
'# flake8: disable=<code>,<code>'),
)
parser.config_options.append('putty-select')
parser.config_options.append('putty-ignore')
parser.config_options.append('putty-auto-ignore') |
<SYSTEM_TASK:>
Returns all invoices that are unpaid on freckle but have transactions.
<END_TASK>
<USER_TASK:>
Description:
def get_unpaid_invoices_with_transactions(branch=None):
"""
Returns all invoices that are unpaid on freckle but have transactions.
This means, that the invoice is either partially paid and can be left as
unpaid in freckle, or the invoice has been fully paid and should be set to
paid in freckle as well.
""" |
if not client: # pragma: nocover
return None
result = {}
try:
unpaid_invoices = client.fetch_json(
'invoices', query_params={'state': 'unpaid'})
except (ConnectionError, HTTPError): # pragma: nocover
result.update({'error': _('Wasn\'t able to connect to Freckle.')})
else:
invoices = []
for invoice in unpaid_invoices:
invoice_with_transactions = models.Invoice.objects.filter(
invoice_number=invoice['reference'],
transactions__isnull=False)
if branch:
invoice_with_transactions = invoice_with_transactions.filter(
branch=branch)
if invoice_with_transactions:
invoices.append(invoice)
result.update({'invoices': invoices})
return result |
<SYSTEM_TASK:>
Compare contents of two directories
<END_TASK>
<USER_TASK:>
Description:
def _compare(self, dir1, dir2):
""" Compare contents of two directories """ |
left = set()
right = set()
self._numdirs += 1
excl_patterns = set(self._exclude).union(self._ignore)
for cwd, dirs, files in os.walk(dir1):
self._numdirs += len(dirs)
for f in dirs + files:
path = os.path.relpath(os.path.join(cwd, f), dir1)
re_path = path.replace('\\', '/')
if self._only:
for pattern in self._only:
if re.match(pattern, re_path):
# go to exclude and ignore filtering
break
else:
# next item, this one does not match any pattern
# in the _only list
continue
add_path = False
for pattern in self._include:
if re.match(pattern, re_path):
add_path = True
break
else:
# path was not in includes
# test if it is in excludes
for pattern in excl_patterns:
if re.match(pattern, re_path):
# path is in excludes, do not add it
break
else:
# path was not in excludes
# it should be added
add_path = True
if add_path:
left.add(path)
anc_dirs = re_path[:-1].split('/')
for i in range(1, len(anc_dirs)):
left.add('/'.join(anc_dirs[:i]))
for cwd, dirs, files in os.walk(dir2):
for f in dirs + files:
path = os.path.relpath(os.path.join(cwd, f), dir2)
re_path = path.replace('\\', '/')
for pattern in self._ignore:
if re.match(pattern, re_path):
if f in dirs:
dirs.remove(f)
break
else:
right.add(path)
# no need to add the parent dirs here,
# as there is no _only pattern detection
if f in dirs and path not in left:
self._numdirs += 1
common = left.intersection(right)
left.difference_update(common)
right.difference_update(common)
return DCMP(left, right, common) |
<SYSTEM_TASK:>
Private attribute for doing work
<END_TASK>
<USER_TASK:>
Description:
def _dowork(self, dir1, dir2, copyfunc=None, updatefunc=None):
""" Private attribute for doing work """ |
if self._verbose:
self.log('Source directory: %s:' % dir1)
self._dcmp = self._compare(dir1, dir2)
# Files & directories only in target directory
if self._purge:
for f2 in self._dcmp.right_only:
fullf2 = os.path.join(self._dir2, f2)
if self._verbose:
self.log('Deleting %s' % fullf2)
try:
if os.path.isfile(fullf2):
try:
os.remove(fullf2)
self._deleted.append(fullf2)
self._numdelfiles += 1
except OSError as e:
self.log(str(e))
self._numdelffld += 1
elif os.path.isdir(fullf2):
try:
shutil.rmtree(fullf2, True)
self._deleted.append(fullf2)
self._numdeldirs += 1
except shutil.Error as e:
self.log(str(e))
self._numdeldfld += 1
except Exception as e: # of any use ?
self.log(str(e))
continue
# Files & directories only in source directory
for f1 in self._dcmp.left_only:
try:
st = os.stat(os.path.join(self._dir1, f1))
except os.error:
continue
if stat.S_ISREG(st.st_mode):
if copyfunc:
copyfunc(f1, self._dir1, self._dir2)
self._added.append(os.path.join(self._dir2, f1))
elif stat.S_ISDIR(st.st_mode):
to_make = os.path.join(self._dir2, f1)
if not os.path.exists(to_make):
os.makedirs(to_make)
self._numnewdirs += 1
self._added.append(to_make)
# common files/directories
for f1 in self._dcmp.common:
try:
st = os.stat(os.path.join(self._dir1, f1))
except os.error:
continue
if stat.S_ISREG(st.st_mode):
if updatefunc:
updatefunc(f1, self._dir1, self._dir2) |
<SYSTEM_TASK:>
Private function for copying a file
<END_TASK>
<USER_TASK:>
Description:
def _copy(self, filename, dir1, dir2):
""" Private function for copying a file """ |
# NOTE: dir1 is source & dir2 is target
if self._copyfiles:
rel_path = filename.replace('\\', '/').split('/')
rel_dir = '/'.join(rel_path[:-1])
filename = rel_path[-1]
dir2_root = dir2
dir1 = os.path.join(dir1, rel_dir)
dir2 = os.path.join(dir2, rel_dir)
if self._verbose:
self.log('Copying file %s from %s to %s' %
(filename, dir1, dir2))
try:
# source to target
if self._copydirection == 0 or self._copydirection == 2:
if not os.path.exists(dir2):
if self._forcecopy:
# 1911 = 0o777
os.chmod(os.path.dirname(dir2_root), 1911)
try:
os.makedirs(dir2)
self._numnewdirs += 1
except OSError as e:
self.log(str(e))
self._numdirsfld += 1
if self._forcecopy:
os.chmod(dir2, 1911) # 1911 = 0o777
sourcefile = os.path.join(dir1, filename)
try:
if os.path.islink(sourcefile):
os.symlink(os.readlink(sourcefile),
os.path.join(dir2, filename))
else:
shutil.copy2(sourcefile, dir2)
self._numfiles += 1
except (IOError, OSError) as e:
self.log(str(e))
self._numcopyfld += 1
if self._copydirection == 1 or self._copydirection == 2:
# target to source
if not os.path.exists(dir1):
if self._forcecopy:
# 1911 = 0o777
os.chmod(os.path.dirname(self.dir1_root), 1911)
try:
os.makedirs(dir1)
self._numnewdirs += 1
except OSError as e:
self.log(str(e))
self._numdirsfld += 1
targetfile = os.path.abspath(os.path.join(dir1, filename))
if self._forcecopy:
os.chmod(dir1, 1911) # 1911 = 0o777
sourcefile = os.path.join(dir2, filename)
try:
if os.path.islink(sourcefile):
os.symlink(os.readlink(sourcefile),
os.path.join(dir1, filename))
else:
shutil.copy2(sourcefile, targetfile)
self._numfiles += 1
except (IOError, OSError) as e:
self.log(str(e))
self._numcopyfld += 1
except Exception as e:
self.log('Error copying file %s' % filename)
self.log(str(e)) |
<SYSTEM_TASK:>
Private function for updating a file based on
<END_TASK>
<USER_TASK:>
Description:
def _update(self, filename, dir1, dir2):
""" Private function for updating a file based on
last time stamp of modification """ |
# NOTE: dir1 is source & dir2 is target
if self._updatefiles:
file1 = os.path.join(dir1, filename)
file2 = os.path.join(dir2, filename)
try:
st1 = os.stat(file1)
st2 = os.stat(file2)
except os.error:
return -1
# Update will update in both directions depending
# on the timestamp of the file & copy-direction.
if self._copydirection == 0 or self._copydirection == 2:
# Update file if file's modification time is older than
# source file's modification time, or creation time. Sometimes
# it so happens that a file's creation time is newer than it's
# modification time! (Seen this on windows)
if self._cmptimestamps(st1, st2):
if self._verbose:
# source to target
self.log('Updating file %s' % file2)
try:
if self._forcecopy:
os.chmod(file2, 1638) # 1638 = 0o666
try:
if os.path.islink(file1):
os.symlink(os.readlink(file1), file2)
else:
shutil.copy2(file1, file2)
self._changed.append(file2)
self._numupdates += 1
return 0
except (IOError, OSError) as e:
self.log(str(e))
self._numupdsfld += 1
return -1
except Exception as e:
self.log(str(e))
return -1
if self._copydirection == 1 or self._copydirection == 2:
# Update file if file's modification time is older than
# source file's modification time, or creation time. Sometimes
# it so happens that a file's creation time is newer than it's
# modification time! (Seen this on windows)
if self._cmptimestamps(st2, st1):
if self._verbose:
# target to source
self.log('Updating file %s' % file1)
try:
if self._forcecopy:
os.chmod(file1, 1638) # 1638 = 0o666
try:
if os.path.islink(file2):
os.symlink(os.readlink(file2), file1)
else:
shutil.copy2(file2, file1)
self._changed.append(file1)
self._numupdates += 1
return 0
except (IOError, OSError) as e:
self.log(str(e))
self._numupdsfld += 1
return -1
except Exception as e:
self.log(str(e))
return -1
return -1 |
<SYSTEM_TASK:>
Private function which does directory diff & copy
<END_TASK>
<USER_TASK:>
Description:
def _dirdiffandcopy(self, dir1, dir2):
"""
Private function which does directory diff & copy
""" |
self._dowork(dir1, dir2, self._copy) |
<SYSTEM_TASK:>
Private function which does directory diff & update
<END_TASK>
<USER_TASK:>
Description:
def _dirdiffandupdate(self, dir1, dir2):
"""
Private function which does directory diff & update
""" |
self._dowork(dir1, dir2, None, self._update) |
<SYSTEM_TASK:>
Private function which only does directory diff
<END_TASK>
<USER_TASK:>
Description:
def _diff(self, dir1, dir2):
"""
Private function which only does directory diff
""" |
self._dcmp = self._compare(dir1, dir2)
if self._dcmp.left_only:
self.log('Only in %s' % dir1)
for x in sorted(self._dcmp.left_only):
self.log('>> %s' % x)
self.log('')
if self._dcmp.right_only:
self.log('Only in %s' % dir2)
for x in sorted(self._dcmp.right_only):
self.log('<< %s' % x)
self.log('')
if self._dcmp.common:
self.log('Common to %s and %s' % (self._dir1, self._dir2))
for x in sorted(self._dcmp.common):
self.log('-- %s' % x)
else:
self.log('No common files or sub-directories!') |
<SYSTEM_TASK:>
Update will try to update the target directory
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Update will try to update the target directory
w.r.t source directory. Only files that are common
to both directories will be updated, no new files
or directories are created """ |
self._copyfiles = False
self._updatefiles = True
self._purge = False
self._creatdirs = False
if self._verbose:
self.log('Updating directory %s with %s\n' %
(self._dir2, self._dir1))
self._dirdiffandupdate(self._dir1, self._dir2) |
<SYSTEM_TASK:>
Only report difference in content between two directories
<END_TASK>
<USER_TASK:>
Description:
def diff(self):
"""
Only report difference in content between two directories
""" |
self._copyfiles = False
self._updatefiles = False
self._purge = False
self._creatdirs = False
self._updatefiles = False
self.log('Difference of directory %s from %s\n' %
(self._dir2, self._dir1))
self._diff(self._dir1, self._dir2) |
<SYSTEM_TASK:>
Print report of work at the end
<END_TASK>
<USER_TASK:>
Description:
def report(self):
""" Print report of work at the end """ |
# We need only the first 4 significant digits
tt = (str(self._endtime - self._starttime))[:4]
self.log('\n%s finished in %s seconds.' % (__pkg_name__, tt))
self.log('%d directories parsed, %d files copied' %
(self._numdirs, self._numfiles))
if self._numdelfiles:
self.log('%d files were purged.' % self._numdelfiles)
if self._numdeldirs:
self.log('%d directories were purged.' % self._numdeldirs)
if self._numnewdirs:
self.log('%d directories were created.' % self._numnewdirs)
if self._numupdates:
self.log('%d files were updated by timestamp.' % self._numupdates)
# Failure stats
self.log('')
if self._numcopyfld:
self.log('there were errors in copying %d files.'
% self._numcopyfld)
if self._numdirsfld:
self.log('there were errors in creating %d directories.'
% self._numdirsfld)
if self._numupdsfld:
self.log('there were errors in updating %d files.'
% self._numupdsfld)
if self._numdeldfld:
self.log('there were errors in purging %d directories.'
% self._numdeldfld)
if self._numdelffld:
self.log('there were errors in purging %d files.'
% self._numdelffld) |
<SYSTEM_TASK:>
Yields successive n-sized chunks from l.
<END_TASK>
<USER_TASK:>
Description:
def chunks(l, n):
"""
Yields successive n-sized chunks from l.
""" |
for i in _range(0, len(l), n):
yield l[i:i + n] |
<SYSTEM_TASK:>
Registers a new recipe class.
<END_TASK>
<USER_TASK:>
Description:
def register(self, recipe):
"""
Registers a new recipe class.
""" |
if not isinstance(recipe, (list, tuple)):
recipe = [recipe, ]
for item in recipe:
recipe = self.get_recipe_instance_from_class(item)
self._registry[recipe.slug] = recipe |
<SYSTEM_TASK:>
Unregisters a given recipe class.
<END_TASK>
<USER_TASK:>
Description:
def unregister(self, recipe):
"""
Unregisters a given recipe class.
""" |
recipe = self.get_recipe_instance_from_class(recipe)
if recipe.slug in self._registry:
del self._registry[recipe.slug] |
<SYSTEM_TASK:>
Returns the recipe instance for the given badge slug.
<END_TASK>
<USER_TASK:>
Description:
def get_recipe_instance(self, badge):
"""
Returns the recipe instance for the given badge slug.
If badge has not been registered, raises ``exceptions.BadgeNotFound``.
""" |
from .exceptions import BadgeNotFound
if badge in self._registry:
return self.recipes[badge]
raise BadgeNotFound() |
<SYSTEM_TASK:>
Returns all recipe instances or just those for the given badges.
<END_TASK>
<USER_TASK:>
Description:
def get_recipe_instances(self, badges=None, excluded=None):
"""
Returns all recipe instances or just those for the given badges.
""" |
if badges:
if not isinstance(badges, (list, tuple)):
badges = [badges]
if excluded:
if not isinstance(excluded, (list, tuple)):
excluded = [excluded]
badges = list(set(self.registered) - set(excluded))
if badges:
valid, invalid = self.get_recipe_instances_for_badges(badges=badges)
return valid
return self.recipes.values() |
<SYSTEM_TASK:>
Initializes the modules stub based off of your current yaml files
<END_TASK>
<USER_TASK:>
Description:
def _init_modules_stub(self, **_):
"""Initializes the modules stub based off of your current yaml files
Implements solution from
http://stackoverflow.com/questions/28166558/invalidmoduleerror-when-using-testbed-to-unit-test-google-app-engine
""" |
from google.appengine.api import request_info
# edit all_versions per modules & versions thereof needing tests
all_versions = {} # {'default': [1], 'andsome': [2], 'others': [1]}
def_versions = {} # {m: all_versions[m][0] for m in all_versions}
m2h = {} # {m: {def_versions[m]: 'localhost:8080'} for m in def_versions}
for module in self.configuration.modules:
module_name = module._module_name or 'default'
module_version = module._version or '1'
all_versions[module_name] = [module_version]
def_versions[module_name] = module_version
m2h[module_name] = {module_version: 'localhost:8080'}
request_info._local_dispatcher = request_info._LocalFakeDispatcher(
module_names=list(all_versions),
module_name_to_versions=all_versions,
module_name_to_default_versions=def_versions,
module_name_to_version_to_hostname=m2h)
self.testbed.init_modules_stub() |
<SYSTEM_TASK:>
Initializes all other stubs for consistency's sake
<END_TASK>
<USER_TASK:>
Description:
def _init_stub(self, stub_init, **stub_kwargs):
"""Initializes all other stubs for consistency's sake""" |
getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs) |
<SYSTEM_TASK:>
Returns an HTML snippet for an environment variable.
<END_TASK>
<USER_TASK:>
Description:
def html_for_env_var(key):
"""Returns an HTML snippet for an environment variable.
Args:
key: A string representing an environment variable name.
Returns:
String HTML representing the value and variable.
""" |
value = os.getenv(key)
return KEY_VALUE_TEMPLATE.format(key, value) |
<SYSTEM_TASK:>
Returns an HTML snippet for a CGI argument.
<END_TASK>
<USER_TASK:>
Description:
def html_for_cgi_argument(argument, form):
"""Returns an HTML snippet for a CGI argument.
Args:
argument: A string representing an CGI argument name in a form.
form: A CGI FieldStorage object.
Returns:
String HTML representing the CGI value and variable.
""" |
value = form[argument].value if argument in form else None
return KEY_VALUE_TEMPLATE.format(argument, value) |
<SYSTEM_TASK:>
Returns an HTML snippet for a Modules API method.
<END_TASK>
<USER_TASK:>
Description:
def html_for_modules_method(method_name, *args, **kwargs):
"""Returns an HTML snippet for a Modules API method.
Args:
method_name: A string containing a Modules API method.
args: Positional arguments to be passed to the method.
kwargs: Keyword arguments to be passed to the method.
Returns:
String HTML representing the Modules API method and value.
""" |
method = getattr(modules, method_name)
value = method(*args, **kwargs)
return KEY_VALUE_TEMPLATE.format(method_name, value) |
<SYSTEM_TASK:>
GET handler that serves environment data.
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""GET handler that serves environment data.""" |
environment_variables_output = [html_for_env_var(key)
for key in sorted(os.environ)]
cgi_arguments_output = []
if os.getenv('CONTENT_TYPE') == 'application/x-www-form-urlencoded':
# Note: a blank Content-type header will still sometimes
# (in dev_appserver) show up as 'application/x-www-form-urlencoded'
form = cgi.FieldStorage()
if not form:
cgi_arguments_output.append('No CGI arguments given...')
else:
for cgi_argument in form:
cgi_arguments_output.append(
html_for_cgi_argument(cgi_argument, form))
else:
data = ''
cgi_arguments_output.append(STDIN_TEMPLATE.format(len(data)))
cgi_arguments_output.append(cgi.escape(data))
modules_api_output = [
html_for_modules_method('get_current_module_name'),
html_for_modules_method('get_current_version_name'),
html_for_modules_method('get_current_instance_id'),
html_for_modules_method('get_modules'),
html_for_modules_method('get_versions'),
html_for_modules_method('get_default_version'),
html_for_modules_method('get_hostname'),
]
result = PAGE_TEMPLATE.format(
users.CreateLoginURL(self.request.url),
users.CreateLogoutURL(self.request.url),
'<br>\n'.join(environment_variables_output),
'<br>\n'.join(cgi_arguments_output),
'<br>\n'.join(modules_api_output),
)
self.response.write(result) |
<SYSTEM_TASK:>
Iterates over registered recipes and creates missing badges.
<END_TASK>
<USER_TASK:>
Description:
def sync_badges(**kwargs):
"""
Iterates over registered recipes and creates missing badges.
""" |
update = kwargs.get('update', False)
created_badges = []
instances = registry.get_recipe_instances()
for instance in instances:
reset_queries()
badge, created = instance.create_badge(update=update)
if created:
created_badges.append(badge)
log_queries(instance)
return created_badges |
<SYSTEM_TASK:>
Iterates over registered recipes and possibly creates awards.
<END_TASK>
<USER_TASK:>
Description:
def sync_awards(**kwargs):
"""
Iterates over registered recipes and possibly creates awards.
""" |
badges = kwargs.get('badges')
excluded = kwargs.get('exclude_badges')
disable_signals = kwargs.get('disable_signals')
batch_size = kwargs.get('batch_size', None)
db_read = kwargs.get('db_read', None)
award_post_save = True
if disable_signals:
settings.AUTO_DENORMALIZE = False
award_post_save = False
instances = registry.get_recipe_instances(badges=badges, excluded=excluded)
for instance in instances:
reset_queries()
instance.create_awards(
batch_size=batch_size,
db_read=db_read,
post_save_signal=award_post_save)
log_queries(instance) |
<SYSTEM_TASK:>
Returns current user ids and the count.
<END_TASK>
<USER_TASK:>
Description:
def get_current_user_ids(self, db_read=None):
"""
Returns current user ids and the count.
""" |
db_read = db_read or self.db_read
return self.user_ids.using(db_read) |
<SYSTEM_TASK:>
Accepts two OrganizationName objects and returns an arbitrary,
<END_TASK>
<USER_TASK:>
Description:
def compare(cls, match, subject):
"""
Accepts two OrganizationName objects and returns an arbitrary,
numerical score based upon how well the names match.
""" |
if match.expand().lower() == subject.expand().lower():
return 4
elif match.kernel().lower() == subject.kernel().lower():
return 3
# law and lobbying firms in CRP data typically list only the first two partners
# before 'et al'
elif ',' in subject.expand(): # we may have a list of partners
if subject.crp_style_firm_name() == str(match).lower():
return 3
else:
return 2 |
<SYSTEM_TASK:>
Returns all badges or only awarded badges for the given user.
<END_TASK>
<USER_TASK:>
Description:
def badgify_badges(**kwargs):
"""
Returns all badges or only awarded badges for the given user.
""" |
User = get_user_model()
user = kwargs.get('user', None)
username = kwargs.get('username', None)
if username:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
if user:
awards = Award.objects.filter(user=user).select_related('badge')
badges = [award.badge for award in awards]
return badges
return Badge.objects.all() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.