text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns a figure with both FGA and basketball court lines drawn onto it.
<END_TASK>
<USER_TASK:>
Description:
def bokeh_shot_chart(data, x="LOC_X", y="LOC_Y", fill_color="#1f77b4",
scatter_size=10, fill_alpha=0.4, line_alpha=0.4,
court_line_color='gray', court_line_width=1,
hover_tool=False, tooltips=None, **kwargs):
# TODO: Settings for hover tooltip
"""
Returns a figure with both FGA and basketball court lines drawn onto it.
This function expects data to be a ColumnDataSource with the x and y values
named "LOC_X" and "LOC_Y". Otherwise specify x and y.
Parameters
----------
data : DataFrame
The DataFrame that contains the shot chart data.
x, y : str, optional
The x and y coordinates of the shots taken.
fill_color : str, optional
The fill color of the shots. Can be a a Hex value.
scatter_size : int, optional
The size of the dots for the scatter plot.
fill_alpha : float, optional
Alpha value for the shots. Must be a floating point value between 0
(transparent) to 1 (opaque).
line_alpha : float, optiona
Alpha value for the outer lines of the plotted shots. Must be a
floating point value between 0 (transparent) to 1 (opaque).
court_line_color : str, optional
The color of the court lines. Can be a a Hex value.
court_line_width : float, optional
The linewidth the of the court lines in pixels.
hover_tool : boolean, optional
If ``True``, creates hover tooltip for the plot.
tooltips : List of tuples, optional
Provides the information for the the hover tooltip.
Returns
-------
fig : Figure
The Figure object with the shot chart plotted on it.
""" |
source = ColumnDataSource(data)
fig = figure(width=700, height=658, x_range=[-250, 250],
y_range=[422.5, -47.5], min_border=0, x_axis_type=None,
y_axis_type=None, outline_line_color="black", **kwargs)
fig.scatter(x, y, source=source, size=scatter_size, color=fill_color,
alpha=fill_alpha, line_alpha=line_alpha)
bokeh_draw_court(fig, line_color=court_line_color,
line_width=court_line_width)
if hover_tool:
hover = HoverTool(renderers=[fig.renderers[0]], tooltips=tooltips)
fig.add_tools(hover)
return fig |
<SYSTEM_TASK:>
Run a single trial of k-medoids clustering
<END_TASK>
<USER_TASK:>
Description:
def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng):
""" Run a single trial of k-medoids clustering
on dataset X, and given number of clusters
""" |
membs = np.empty(shape=X.shape[0], dtype=int)
centers = kmeans._kmeans_init(X, n_clusters, method='', rng=rng)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = kmeans._assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters, distance)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter) |
<SYSTEM_TASK:>
Initialize mixture density parameters with
<END_TASK>
<USER_TASK:>
Description:
def _init_mixture_params(X, n_mixtures, init_method):
"""
Initialize mixture density parameters with
equal priors
random means
identity covariance matrices
""" |
init_priors = np.ones(shape=n_mixtures, dtype=float) / n_mixtures
if init_method == 'kmeans':
km = _kmeans.KMeans(n_clusters = n_mixtures, n_trials=20)
km.fit(X)
init_means = km.centers_
else:
inx_rand = np.random.choice(X.shape[0], size=n_mixtures)
init_means = X[inx_rand,:]
if np.any(np.isnan(init_means)):
raise ValueError("Init means are NaN! ")
n_features = X.shape[1]
init_covars = np.empty(shape=(n_mixtures, n_features, n_features), dtype=float)
for i in range(n_mixtures):
init_covars[i] = np.eye(n_features)
return(init_priors, init_means, init_covars) |
<SYSTEM_TASK:>
This is just a test function to calculate
<END_TASK>
<USER_TASK:>
Description:
def __log_density_single(x, mean, covar):
""" This is just a test function to calculate
the normal density at x given mean and covariance matrix.
Note: this function is not efficient, so
_log_multivariate_density is recommended for use.
""" |
n_dim = mean.shape[0]
dx = x - mean
covar_inv = scipy.linalg.inv(covar)
covar_det = scipy.linalg.det(covar)
den = np.dot(np.dot(dx.T, covar_inv), dx) + n_dim*np.log(2*np.pi) + np.log(covar_det)
return(-1/2 * den) |
<SYSTEM_TASK:>
Calculate the SSE to the cluster center
<END_TASK>
<USER_TASK:>
Description:
def _cal_dist2center(X, center):
""" Calculate the SSE to the cluster center
""" |
dmemb2cen = scipy.spatial.distance.cdist(X, center.reshape(1,X.shape[1]), metric='seuclidean')
return(np.sum(dmemb2cen)) |
<SYSTEM_TASK:>
Run a single trial of k-means clustering
<END_TASK>
<USER_TASK:>
Description:
def _kmeans_run(X, n_clusters, max_iter, tol):
""" Run a single trial of k-means clustering
on dataset X, and given number of clusters
""" |
membs = np.empty(shape=X.shape[0], dtype=int)
centers = _kmeans_init(X, n_clusters)
sse_last = 9999.9
n_iter = 0
for it in range(1,max_iter):
membs = _assign_clusters(X, centers)
centers,sse_arr = _update_centers(X, membs, n_clusters)
sse_total = np.sum(sse_arr)
if np.abs(sse_total - sse_last) < tol:
n_iter = it
break
sse_last = sse_total
return(centers, membs, sse_total, sse_arr, n_iter) |
<SYSTEM_TASK:>
Run multiple trials of k-means clustering,
<END_TASK>
<USER_TASK:>
Description:
def _kmeans(X, n_clusters, max_iter, n_trials, tol):
""" Run multiple trials of k-means clustering,
and outputt he best centers, and cluster labels
""" |
n_samples, n_features = X.shape[0], X.shape[1]
centers_best = np.empty(shape=(n_clusters,n_features), dtype=float)
labels_best = np.empty(shape=n_samples, dtype=int)
for i in range(n_trials):
centers, labels, sse_tot, sse_arr, n_iter = _kmeans_run(X, n_clusters, max_iter, tol)
if i==0:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
if sse_tot < sse_tot_best:
sse_tot_best = sse_tot
sse_arr_best = sse_arr
n_iter_best = n_iter
centers_best = centers.copy()
labels_best = labels.copy()
return(centers_best, labels_best, sse_arr_best, n_iter_best) |
<SYSTEM_TASK:>
Cut the tree to get desired number of clusters as n_clusters
<END_TASK>
<USER_TASK:>
Description:
def _cut_tree(tree, n_clusters, membs):
""" Cut the tree to get desired number of clusters as n_clusters
2 <= n_desired <= n_clusters
""" |
## starting from root,
## a node is added to the cut_set or
## its children are added to node_set
assert(n_clusters >= 2)
assert(n_clusters <= len(tree.leaves()))
cut_centers = dict() #np.empty(shape=(n_clusters, ndim), dtype=float)
for i in range(n_clusters-1):
if i==0:
search_set = set(tree.children(0))
node_set,cut_set = set(), set()
else:
search_set = node_set.union(cut_set)
node_set,cut_set = set(), set()
if i+2 == n_clusters:
cut_set = search_set
else:
for _ in range(len(search_set)):
n = search_set.pop()
if n.data['ilev'] is None or n.data['ilev']>i+2:
cut_set.add(n)
else:
nid = n.identifier
if n.data['ilev']-2==i:
node_set = node_set.union(set(tree.children(nid)))
conv_membs = membs.copy()
for node in cut_set:
nid = node.identifier
label = node.data['label']
cut_centers[label] = node.data['center']
sub_leaves = tree.leaves(nid)
for leaf in sub_leaves:
indx = np.where(conv_membs == leaf)[0]
conv_membs[indx] = nid
return(conv_membs, cut_centers) |
<SYSTEM_TASK:>
Add a node to the tree
<END_TASK>
<USER_TASK:>
Description:
def _add_tree_node(tree, label, ilev, X=None, size=None, center=None, sse=None, parent=None):
""" Add a node to the tree
if parent is not known, the node is a root
The nodes of this tree keep properties of each cluster/subcluster:
size --> cluster size as the number of points in the cluster
center --> mean of the cluster
label --> cluster label
sse --> sum-squared-error for that single cluster
ilev --> the level at which this node is split into 2 children
""" |
if size is None:
size = X.shape[0]
if (center is None):
center = np.mean(X, axis=0)
if (sse is None):
sse = _kmeans._cal_dist2center(X, center)
center = list(center)
datadict = {
'size' : size,
'center': center,
'label' : label,
'sse' : sse,
'ilev' : None
}
if (parent is None):
tree.create_node(label, label, data=datadict)
else:
tree.create_node(label, label, parent=parent, data=datadict)
tree.get_node(parent).data['ilev'] = ilev
return(tree) |
<SYSTEM_TASK:>
Apply Bisecting Kmeans clustering
<END_TASK>
<USER_TASK:>
Description:
def _bisect_kmeans(X, n_clusters, n_trials, max_iter, tol):
""" Apply Bisecting Kmeans clustering
to reach n_clusters number of clusters
""" |
membs = np.empty(shape=X.shape[0], dtype=int)
centers = dict() #np.empty(shape=(n_clusters,X.shape[1]), dtype=float)
sse_arr = dict() #-1.0*np.ones(shape=n_clusters, dtype=float)
## data structure to store cluster hierarchies
tree = treelib.Tree()
tree = _add_tree_node(tree, 0, ilev=0, X=X)
km = _kmeans.KMeans(n_clusters=2, n_trials=n_trials, max_iter=max_iter, tol=tol)
for i in range(1,n_clusters):
sel_clust_id,sel_memb_ids = _select_cluster_2_split(membs, tree)
X_sub = X[sel_memb_ids,:]
km.fit(X_sub)
#print("Bisecting Step %d :"%i, sel_clust_id, km.sse_arr_, km.centers_)
## Updating the clusters & properties
#sse_arr[[sel_clust_id,i]] = km.sse_arr_
#centers[[sel_clust_id,i]] = km.centers_
tree = _add_tree_node(tree, 2*i-1, i, \
size=np.sum(km.labels_ == 0), center=km.centers_[0], \
sse=km.sse_arr_[0], parent= sel_clust_id)
tree = _add_tree_node(tree, 2*i, i, \
size=np.sum(km.labels_ == 1), center=km.centers_[1], \
sse=km.sse_arr_[1], parent= sel_clust_id)
pred_labels = km.labels_
pred_labels[np.where(pred_labels == 1)[0]] = 2*i
pred_labels[np.where(pred_labels == 0)[0]] = 2*i - 1
#if sel_clust_id == 1:
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
# pred_labels[np.where(pred_labels == 1)[0]] = i
#else:
# pred_labels[np.where(pred_labels == 1)[0]] = i
# pred_labels[np.where(pred_labels == 0)[0]] = sel_clust_id
membs[sel_memb_ids] = pred_labels
for n in tree.leaves():
label = n.data['label']
centers[label] = n.data['center']
sse_arr[label] = n.data['sse']
return(centers, membs, sse_arr, tree) |
<SYSTEM_TASK:>
Return a LaTeX ready table of model comparisons.
<END_TASK>
<USER_TASK:>
Description:
def comparison_table(self, caption=None, label="tab:model_comp", hlines=True,
aic=True, bic=True, dic=True, sort="bic", descending=True): # pragma: no cover
"""
Return a LaTeX ready table of model comparisons.
Parameters
----------
caption : str, optional
The table caption to insert.
label : str, optional
The table label to insert.
hlines : bool, optional
Whether to insert hlines in the table or not.
aic : bool, optional
Whether to include a column for AICc or not.
bic : bool, optional
Whether to include a column for BIC or not.
dic : bool, optional
Whether to include a column for DIC or not.
sort : str, optional
How to sort the models. Should be one of "bic", "aic" or "dic".
descending : bool, optional
The sort order.
Returns
-------
str
A LaTeX table to be copied into your document.
""" |
if sort == "bic":
assert bic, "You cannot sort by BIC if you turn it off"
if sort == "aic":
assert aic, "You cannot sort by AIC if you turn it off"
if sort == "dic":
assert dic, "You cannot sort by DIC if you turn it off"
if caption is None:
caption = ""
if label is None:
label = ""
base_string = get_latex_table_frame(caption, label)
end_text = " \\\\ \n"
num_cols = 1 + (1 if aic else 0) + (1 if bic else 0)
column_text = "c" * (num_cols + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text
center_text += "\tModel" + (" & AIC" if aic else "") + (" & BIC " if bic else "") \
+ (" & DIC " if dic else "") + end_text
if hlines:
center_text += "\t" + hline_text
if aic:
aics = self.aic()
else:
aics = np.zeros(len(self.parent.chains))
if bic:
bics = self.bic()
else:
bics = np.zeros(len(self.parent.chains))
if dic:
dics = self.dic()
else:
dics = np.zeros(len(self.parent.chains))
if sort == "bic":
to_sort = bics
elif sort == "aic":
to_sort = aics
elif sort == "dic":
to_sort = dics
else:
raise ValueError("sort %s not recognised, must be dic, aic or dic" % sort)
good = [i for i, t in enumerate(to_sort) if t is not None]
names = [self.parent.chains[g].name for g in good]
aics = [aics[g] for g in good]
bics = [bics[g] for g in good]
to_sort = bics if sort == "bic" else aics
indexes = np.argsort(to_sort)
if descending:
indexes = indexes[::-1]
for i in indexes:
line = "\t" + names[i]
if aic:
line += " & %5.1f " % aics[i]
if bic:
line += " & %5.1f " % bics[i]
if dic:
line += " & %5.1f " % dics[i]
line += end_text
center_text += line
if hlines:
center_text += "\t" + hline_text
return base_string % (column_text, center_text) |
<SYSTEM_TASK:>
Plots the chain walk; the parameter values as a function of step index.
<END_TASK>
<USER_TASK:>
Description:
def plot_walks(self, parameters=None, truth=None, extents=None, display=False,
filename=None, chains=None, convolve=None, figsize=None,
plot_weights=True, plot_posterior=True, log_weight=None): # pragma: no cover
""" Plots the chain walk; the parameter values as a function of step index.
This plot is more for a sanity or consistency check than for use with final results.
Plotting this before plotting with :func:`plot` allows you to quickly see if the
chains are well behaved, or if certain parameters are suspect
or require a greater burn in period.
The desired outcome is to see an unchanging distribution along the x-axis of the plot.
If there are obvious tails or features in the parameters, you probably want
to investigate.
Parameters
----------
parameters : list[str]|int, optional
Specify a subset of parameters to plot. If not set, all parameters are plotted.
If an integer is given, only the first so many parameters are plotted.
truth : list[float]|dict[str], optional
A list of truth values corresponding to parameters, or a dictionary of
truth values keyed by the parameter.
extents : list[tuple]|dict[str], optional
A list of two-tuples for plot extents per parameter, or a dictionary of
extents keyed by the parameter.
display : bool, optional
If set, shows the plot using ``plt.show()``
filename : str, optional
If set, saves the figure to the filename
chains : int|str, list[str|int], optional
Used to specify which chain to show if more than one chain is loaded in.
Can be an integer, specifying the
chain index, or a str, specifying the chain name.
convolve : int, optional
If set, overplots a smoothed version of the steps using ``convolve`` as
the width of the smoothing filter.
figsize : tuple, optional
If set, sets the created figure size.
plot_weights : bool, optional
If true, plots the weight if they are available
plot_posterior : bool, optional
If true, plots the log posterior if they are available
log_weight : bool, optional
Whether to display weights in log space or not. If None, the value is
inferred by the mean weights of the plotted chains.
Returns
-------
figure
the matplotlib figure created
""" |
chains, parameters, truth, extents, _ = self._sanitise(chains, parameters, truth, extents)
n = len(parameters)
extra = 0
if plot_weights:
plot_weights = plot_weights and np.any([np.any(c.weights != 1.0) for c in chains])
plot_posterior = plot_posterior and np.any([c.posterior is not None for c in chains])
if plot_weights:
extra += 1
if plot_posterior:
extra += 1
if figsize is None:
figsize = (8, 0.75 + (n + extra))
fig, axes = plt.subplots(figsize=figsize, nrows=n + extra, squeeze=False, sharex=True)
for i, axes_row in enumerate(axes):
ax = axes_row[0]
if i >= extra:
p = parameters[i - n]
for chain in chains:
if p in chain.parameters:
chain_row = chain.get_data(p)
self._plot_walk(ax, p, chain_row, extents=extents.get(p), convolve=convolve, color=chain.config["color"])
if truth.get(p) is not None:
self._plot_walk_truth(ax, truth.get(p))
else:
if i == 0 and plot_posterior:
for chain in chains:
if chain.posterior is not None:
self._plot_walk(ax, "$\log(P)$", chain.posterior - chain.posterior.max(),
convolve=convolve, color=chain.config["color"])
else:
if log_weight is None:
log_weight = np.any([chain.weights.mean() < 0.1 for chain in chains])
if log_weight:
for chain in chains:
self._plot_walk(ax, r"$\log_{10}(w)$", np.log10(chain.weights),
convolve=convolve, color=chain.config["color"])
else:
for chain in chains:
self._plot_walk(ax, "$w$", chain.weights,
convolve=convolve, color=chain.config["color"])
if filename is not None:
if isinstance(filename, str):
filename = [filename]
for f in filename:
self._save_fig(fig, f, 300)
if display:
plt.show()
return fig |
<SYSTEM_TASK:>
r""" Runs the Gelman Rubin diagnostic on the supplied chains.
<END_TASK>
<USER_TASK:>
Description:
def gelman_rubin(self, chain=None, threshold=0.05):
r""" Runs the Gelman Rubin diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
threshold : float, optional
The maximum deviation permitted from 1 for the final value
:math:`\hat{R}`
Returns
-------
float
whether or not the chains pass the test
Notes
-----
I follow PyMC in calculating the Gelman-Rubin statistic, where,
having :math:`m` chains of length :math:`n`, we compute
.. math::
B = \frac{n}{m-1} \sum_{j=1}^{m} \left(\bar{\theta}_{.j} - \bar{\theta}_{..}\right)^2
W = \frac{1}{m} \sum_{j=1}^{m} \left[ \frac{1}{n-1} \sum_{i=1}^{n} \left( \theta_{ij} - \bar{\theta_{.j}}\right)^2 \right]
where :math:`\theta` represents each model parameter. We then compute
:math:`\hat{V} = \frac{n_1}{n}W + \frac{1}{n}B`, and have our convergence ratio
:math:`\hat{R} = \sqrt{\frac{\hat{V}}{W}}`. We check that for all parameters,
this ratio deviates from unity by less than the supplied threshold.
""" |
if chain is None:
return np.all([self.gelman_rubin(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
parameters = chain.parameters
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
assert num_walkers > 1, "Cannot run Gelman-Rubin statistic with only one walker"
m = 1.0 * len(chains)
n = 1.0 * chains[0].shape[0]
all_mean = np.mean(data, axis=0)
chain_means = np.array([np.mean(c, axis=0) for c in chains])
chain_var = np.array([np.var(c, axis=0, ddof=1) for c in chains])
b = n / (m - 1) * ((chain_means - all_mean)**2).sum(axis=0)
w = (1 / m) * chain_var.sum(axis=0)
var = (n - 1) * w / n + b / n
v = var + b / (n * m)
R = np.sqrt(v / w)
passed = np.abs(R - 1) < threshold
print("Gelman-Rubin Statistic values for chain %s" % name)
for p, v, pas in zip(parameters, R, passed):
param = "Param %d" % p if isinstance(p, int) else p
print("%s: %7.5f (%s)" % (param, v, "Passed" if pas else "Failed"))
return np.all(passed) |
<SYSTEM_TASK:>
Runs the Geweke diagnostic on the supplied chains.
<END_TASK>
<USER_TASK:>
Description:
def geweke(self, chain=None, first=0.1, last=0.5, threshold=0.05):
""" Runs the Geweke diagnostic on the supplied chains.
Parameters
----------
chain : int|str, optional
Which chain to run the diagnostic on. By default, this is `None`,
which will run the diagnostic on all chains. You can also
supply and integer (the chain index) or a string, for the chain
name (if you set one).
first : float, optional
The amount of the start of the chain to use
last : float, optional
The end amount of the chain to use
threshold : float, optional
The p-value to use when testing for normality.
Returns
-------
float
whether or not the chains pass the test
""" |
if chain is None:
return np.all([self.geweke(k, threshold=threshold) for k in range(len(self.parent.chains))])
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
num_walkers = chain.walkers
assert num_walkers is not None and num_walkers > 0, \
"You need to specify the number of walkers to use the Geweke diagnostic."
name = chain.name
data = chain.chain
chains = np.split(data, num_walkers)
n = 1.0 * chains[0].shape[0]
n_start = int(np.floor(first * n))
n_end = int(np.floor((1 - last) * n))
mean_start = np.array([np.mean(c[:n_start, i])
for c in chains for i in range(c.shape[1])])
var_start = np.array([self._spec(c[:n_start, i]) / c[:n_start, i].size
for c in chains for i in range(c.shape[1])])
mean_end = np.array([np.mean(c[n_end:, i])
for c in chains for i in range(c.shape[1])])
var_end = np.array([self._spec(c[n_end:, i]) / c[n_end:, i].size
for c in chains for i in range(c.shape[1])])
zs = (mean_start - mean_end) / (np.sqrt(var_start + var_end))
_, pvalue = normaltest(zs)
print("Gweke Statistic for chain %s has p-value %e" % (name, pvalue))
return pvalue > threshold |
<SYSTEM_TASK:>
Generates a LaTeX table from parameter summaries.
<END_TASK>
<USER_TASK:>
Description:
def get_latex_table(self, parameters=None, transpose=False, caption=None,
label="tab:model_params", hlines=True, blank_fill="--"): # pragma: no cover
""" Generates a LaTeX table from parameter summaries.
Parameters
----------
parameters : list[str], optional
A list of what parameters to include in the table. By default, includes all parameters
transpose : bool, optional
Defaults to False, which gives each column as a parameter, each chain (framework)
as a row. You can swap it so that you have a parameter each row and a framework
each column by setting this to True
caption : str, optional
If you want to generate a caption for the table through Python, use this.
Defaults to an empty string
label : str, optional
If you want to generate a label for the table through Python, use this.
Defaults to an empty string
hlines : bool, optional
Inserts ``\\hline`` before and after the header, and at the end of table.
blank_fill : str, optional
If a framework does not have a particular parameter, will fill that cell of
the table with this string.
Returns
-------
str
the LaTeX table.
""" |
if parameters is None:
parameters = self.parent._all_parameters
for p in parameters:
assert isinstance(p, str), \
"Generating a LaTeX table requires all parameters have labels"
num_parameters = len(parameters)
num_chains = len(self.parent.chains)
fit_values = self.get_summary(squeeze=False)
if label is None:
label = ""
if caption is None:
caption = ""
end_text = " \\\\ \n"
if transpose:
column_text = "c" * (num_chains + 1)
else:
column_text = "c" * (num_parameters + 1)
center_text = ""
hline_text = "\\hline\n"
if hlines:
center_text += hline_text + "\t\t"
if transpose:
center_text += " & ".join(["Parameter"] + [c.name for c in self.parent.chains]) + end_text
if hlines:
center_text += "\t\t" + hline_text
for p in parameters:
arr = ["\t\t" + p]
for chain_res in fit_values:
if p in chain_res:
arr.append(self.get_parameter_text(*chain_res[p], wrap=True))
else:
arr.append(blank_fill)
center_text += " & ".join(arr) + end_text
else:
center_text += " & ".join(["Model"] + parameters) + end_text
if hlines:
center_text += "\t\t" + hline_text
for name, chain_res in zip([c.name for c in self.parent.chains], fit_values):
arr = ["\t\t" + name]
for p in parameters:
if p in chain_res:
arr.append(self.get_parameter_text(*chain_res[p], wrap=True))
else:
arr.append(blank_fill)
center_text += " & ".join(arr) + end_text
if hlines:
center_text += "\t\t" + hline_text
final_text = get_latex_table_frame(caption, label) % (column_text, center_text)
return final_text |
<SYSTEM_TASK:>
Gets a summary of the marginalised parameter distributions.
<END_TASK>
<USER_TASK:>
Description:
def get_summary(self, squeeze=True, parameters=None, chains=None):
""" Gets a summary of the marginalised parameter distributions.
Parameters
----------
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
parameters : list[str], optional
A list of parameters which to generate summaries for.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of dictionaries
One entry per chain, parameter bounds stored in dictionary with parameter as key
""" |
results = []
if chains is None:
chains = self.parent.chains
else:
if isinstance(chains, (int, str)):
chains = [chains]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
for chain in chains:
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p not in chain.parameters:
continue
summary = self.get_parameter_summary(chain, p)
res[p] = summary
results.append(res)
if squeeze and len(results) == 1:
return results[0]
return results |
<SYSTEM_TASK:>
Gets the maximum posterior point in parameter space from the passed parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_max_posteriors(self, parameters=None, squeeze=True, chains=None):
""" Gets the maximum posterior point in parameter space from the passed parameters.
Requires the chains to have set `posterior` values.
Parameters
----------
parameters : str|list[str]
The parameters to find
squeeze : bool, optional
Squeeze the summaries. If you only have one chain, squeeze will not return
a length one list, just the single summary. If this is false, you will
get a length one list.
chains : list[int|str], optional
A list of the chains to get a summary of.
Returns
-------
list of two-tuples
One entry per chain, two-tuple represents the max-likelihood coordinate
""" |
results = []
if chains is None:
chains = self.parent.chains
else:
if isinstance(chains, (int, str)):
chains = [chains]
chains = [self.parent.chains[i] for c in chains for i in self.parent._get_chain(c)]
if isinstance(parameters, str):
parameters = [parameters]
for chain in chains:
if chain.posterior_max_index is None:
results.append(None)
continue
res = {}
params_to_find = parameters if parameters is not None else chain.parameters
for p in params_to_find:
if p in chain.parameters:
res[p] = chain.posterior_max_params[p]
results.append(res)
if squeeze and len(results) == 1:
return results[0]
return results |
<SYSTEM_TASK:>
Takes a chain and returns the correlation between chain parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_correlations(self, chain=0, parameters=None):
"""
Takes a chain and returns the correlation between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D correlation matrix.
""" |
parameters, cov = self.get_covariance(chain=chain, parameters=parameters)
diag = np.sqrt(np.diag(cov))
divisor = diag[None, :] * diag[:, None]
correlations = cov / divisor
return parameters, correlations |
<SYSTEM_TASK:>
Takes a chain and returns the covariance between chain parameters.
<END_TASK>
<USER_TASK:>
Description:
def get_covariance(self, chain=0, parameters=None):
"""
Takes a chain and returns the covariance between chain parameters.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
Returns
-------
tuple
The first index giving a list of parameter names, the second index being the
2D covariance matrix.
""" |
index = self.parent._get_chain(chain)
assert len(index) == 1, "Please specify only one chain, have %d chains" % len(index)
chain = self.parent.chains[index[0]]
if parameters is None:
parameters = chain.parameters
data = chain.get_data(parameters)
cov = np.atleast_2d(np.cov(data, aweights=chain.weights, rowvar=False))
return parameters, cov |
<SYSTEM_TASK:>
Gets a LaTeX table of parameter correlations.
<END_TASK>
<USER_TASK:>
Description:
def get_correlation_table(self, chain=0, parameters=None, caption="Parameter Correlations",
label="tab:parameter_correlations"):
"""
Gets a LaTeX table of parameter correlations.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
""" |
parameters, cor = self.get_correlations(chain=chain, parameters=parameters)
return self._get_2d_latex_table(parameters, cor, caption, label) |
<SYSTEM_TASK:>
Gets a LaTeX table of parameter covariance.
<END_TASK>
<USER_TASK:>
Description:
def get_covariance_table(self, chain=0, parameters=None, caption="Parameter Covariance",
label="tab:parameter_covariance"):
"""
Gets a LaTeX table of parameter covariance.
Parameters
----------
chain : int|str, optional
The chain index or name. Defaults to first chain.
parameters : list[str], optional
The list of parameters to compute correlations. Defaults to all parameters
for the given chain.
caption : str, optional
The LaTeX table caption.
label : str, optional
The LaTeX table label.
Returns
-------
str
The LaTeX table ready to go!
""" |
parameters, cov = self.get_covariance(chain=chain, parameters=parameters)
return self._get_2d_latex_table(parameters, cov, caption, label) |
<SYSTEM_TASK:>
Generates LaTeX appropriate text from marginalised parameter bounds.
<END_TASK>
<USER_TASK:>
Description:
def get_parameter_text(self, lower, maximum, upper, wrap=False):
""" Generates LaTeX appropriate text from marginalised parameter bounds.
Parameters
----------
lower : float
The lower bound on the parameter
maximum : float
The value of the parameter with maximum probability
upper : float
The upper bound on the parameter
wrap : bool
Wrap output text in dollar signs for LaTeX
Returns
-------
str
The formatted text given the parameter bounds
""" |
if lower is None or upper is None:
return ""
upper_error = upper - maximum
lower_error = maximum - lower
if upper_error != 0 and lower_error != 0:
resolution = min(np.floor(np.log10(np.abs(upper_error))),
np.floor(np.log10(np.abs(lower_error))))
elif upper_error == 0 and lower_error != 0:
resolution = np.floor(np.log10(np.abs(lower_error)))
elif upper_error != 0 and lower_error == 0:
resolution = np.floor(np.log10(np.abs(upper_error)))
else:
resolution = np.floor(np.log10(np.abs(maximum)))
factor = 0
fmt = "%0.1f"
r = 1
if np.abs(resolution) > 2:
factor = -resolution
if resolution == 2:
fmt = "%0.0f"
factor = -1
r = 0
if resolution == 1:
fmt = "%0.0f"
if resolution == -1:
fmt = "%0.2f"
r = 2
elif resolution == -2:
fmt = "%0.3f"
r = 3
upper_error *= 10 ** factor
lower_error *= 10 ** factor
maximum *= 10 ** factor
upper_error = round(upper_error, r)
lower_error = round(lower_error, r)
maximum = round(maximum, r)
if maximum == -0.0:
maximum = 0.0
if resolution == 2:
upper_error *= 10 ** -factor
lower_error *= 10 ** -factor
maximum *= 10 ** -factor
factor = 0
fmt = "%0.0f"
upper_error_text = fmt % upper_error
lower_error_text = fmt % lower_error
if upper_error_text == lower_error_text:
text = r"%s\pm %s" % (fmt, "%s") % (maximum, lower_error_text)
else:
text = r"%s^{+%s}_{-%s}" % (fmt, "%s", "%s") % \
(maximum, upper_error_text, lower_error_text)
if factor != 0:
text = r"\left( %s \right) \times 10^{%d}" % (text, -factor)
if wrap:
text = "$%s$" % text
return text |
<SYSTEM_TASK:>
Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone!
<END_TASK>
<USER_TASK:>
Description:
def remove_chain(self, chain=-1):
"""
Removes a chain from ChainConsumer. Calling this will require any configurations set to be redone!
Parameters
----------
chain : int|str, list[str|int]
The chain(s) to remove. You can pass in either the chain index, or the chain name, to remove it.
By default removes the last chain added.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
""" |
if isinstance(chain, str) or isinstance(chain, int):
chain = [chain]
chain = sorted([i for c in chain for i in self._get_chain(c)])[::-1]
assert len(chain) == len(list(set(chain))), "Error, you are trying to remove a chain more than once."
for index in chain:
del self.chains[index]
seen = set()
self._all_parameters = [p for c in self.chains for p in c.parameters if not (p in seen or seen.add(p))]
# Need to reconfigure
self._init_params()
return self |
<SYSTEM_TASK:>
Configure the arguments passed to the ``axvline`` and ``axhline``
<END_TASK>
<USER_TASK:>
Description:
def configure_truth(self, **kwargs): # pragma: no cover
""" Configure the arguments passed to the ``axvline`` and ``axhline``
methods when plotting truth values.
If you do not call this explicitly, the :func:`plot` method will
invoke this method automatically.
Recommended to set the parameters ``linestyle``, ``color`` and/or ``alpha``
if you want some basic control.
Default is to use an opaque black dashed line.
Parameters
----------
kwargs : dict
The keyword arguments to unwrap when calling ``axvline`` and ``axhline``.
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
""" |
if kwargs.get("ls") is None and kwargs.get("linestyle") is None:
kwargs["ls"] = "--"
kwargs["dashes"] = (3, 3)
if kwargs.get("color") is None:
kwargs["color"] = "#000000"
self.config_truth = kwargs
self._configured_truth = True
return self |
<SYSTEM_TASK:>
Returns a ChainConsumer instance containing all the walks of a given chain
<END_TASK>
<USER_TASK:>
Description:
def divide_chain(self, chain=0):
"""
Returns a ChainConsumer instance containing all the walks of a given chain
as individual chains themselves.
This method might be useful if, for example, your chain was made using
MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could
call this to get a ChainConsumer instance with one chain for ech of the
four walks. If you then plot, hopefully all four contours
you would see agree.
Parameters
----------
chain : int|str, optional
The index or name of the chain you want divided
Returns
-------
ChainConsumer
A new ChainConsumer instance with the same settings as the parent instance, containing
``num_walker`` chains.
""" |
indexes = self._get_chain(chain)
con = ChainConsumer()
for index in indexes:
chain = self.chains[index]
assert chain.walkers is not None, "The chain you have selected was not added with any walkers!"
num_walkers = chain.walkers
data = np.split(chain.chain, num_walkers)
ws = np.split(chain.weights, num_walkers)
for j, (c, w) in enumerate(zip(data, ws)):
con.add_chain(c, weights=w, name="Chain %d" % j, parameters=chain.parameters)
return con |
<SYSTEM_TASK:>
Calculate motif score threshold for a given FPR.
<END_TASK>
<USER_TASK:>
Description:
def threshold(args):
"""Calculate motif score threshold for a given FPR.""" |
if args.fpr < 0 or args.fpr > 1:
print("Please specify a FPR between 0 and 1")
sys.exit(1)
motifs = read_motifs(args.pwmfile)
s = Scanner()
s.set_motifs(args.pwmfile)
s.set_threshold(args.fpr, filename=args.inputfile)
print("Motif\tScore\tCutoff")
for motif in motifs:
min_score = motif.pwm_min_score()
max_score = motif.pwm_max_score()
opt_score = s.threshold[motif.id]
if opt_score is None:
opt_score = motif.pwm_max_score()
threshold = (opt_score - min_score) / (max_score - min_score)
print("{0}\t{1}\t{2}".format(
motif.id, opt_score, threshold)) |
<SYSTEM_TASK:>
Convert two arrays of values to an array of labels and an array of scores.
<END_TASK>
<USER_TASK:>
Description:
def values_to_labels(fg_vals, bg_vals):
"""
Convert two arrays of values to an array of labels and an array of scores.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
y_true : array
Labels.
y_score : array
Values.
""" |
y_true = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals))))
y_score = np.hstack((fg_vals, bg_vals))
return y_true, y_score |
<SYSTEM_TASK:>
Computes the maximum enrichment.
<END_TASK>
<USER_TASK:>
Description:
def max_enrichment(fg_vals, bg_vals, minbg=2):
"""
Computes the maximum enrichment.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
minbg : int, optional
Minimum number of matches in background. The default is 2.
Returns
-------
enrichment : float
Maximum enrichment.
""" |
scores = np.hstack((fg_vals, bg_vals))
idx = np.argsort(scores)
x = np.hstack((np.ones(len(fg_vals)), np.zeros(len(bg_vals))))
xsort = x[idx]
l_fg = len(fg_vals)
l_bg = len(bg_vals)
m = 0
s = 0
for i in range(len(scores), 0, -1):
bgcount = float(len(xsort[i:][xsort[i:] == 0]))
if bgcount >= minbg:
enr = (len(xsort[i:][xsort[i:] == 1]) / l_fg) / (bgcount / l_bg)
if enr > m:
m = enr
s = scores[idx[i]]
return m |
<SYSTEM_TASK:>
Computes the ROC Area Under Curve until a certain FPR value.
<END_TASK>
<USER_TASK:>
Description:
def roc_auc_xlim(x_bla, y_bla, xlim=0.1):
"""
Computes the ROC Area Under Curve until a certain FPR value.
Parameters
----------
fg_vals : array_like
list of values for positive set
bg_vals : array_like
list of values for negative set
xlim : float, optional
FPR value
Returns
-------
score : float
ROC AUC score
""" |
x = x_bla[:]
y = y_bla[:]
x.sort()
y.sort()
u = {}
for i in x + y:
u[i] = 1
vals = sorted(u.keys())
len_x = float(len(x))
len_y = float(len(y))
new_x = []
new_y = []
x_p = 0
y_p = 0
for val in vals[::-1]:
while len(x) > 0 and x[-1] >= val:
x.pop()
x_p += 1
while len(y) > 0 and y[-1] >= val:
y.pop()
y_p += 1
new_y.append((len_x - x_p) / len_x)
new_x.append((len_y - y_p) / len_y)
#print new_x
#print new_y
new_x = 1 - np.array(new_x)
new_y = 1 - np.array(new_y)
#plot(new_x, new_y)
#show()
x = new_x
y = new_y
if len(x) != len(y):
raise ValueError("Unequal!")
if not xlim:
xlim = 1.0
auc = 0.0
bla = zip(stats.rankdata(x), range(len(x)))
bla = sorted(bla, key=lambda x: x[1])
prev_x = x[bla[0][1]]
prev_y = y[bla[0][1]]
index = 1
while index < len(bla) and x[bla[index][1]] <= xlim:
_, i = bla[index]
auc += y[i] * (x[i] - prev_x) - ((x[i] - prev_x) * (y[i] - prev_y) / 2.0)
prev_x = x[i]
prev_y = y[i]
index += 1
if index < len(bla):
(rank, i) = bla[index]
auc += prev_y * (xlim - prev_x) + ((y[i] - prev_y)/(x[i] - prev_x) * (xlim -prev_x) * (xlim - prev_x)/2)
return auc |
<SYSTEM_TASK:>
Computes the maximum F-measure.
<END_TASK>
<USER_TASK:>
Description:
def max_fmeasure(fg_vals, bg_vals):
"""
Computes the maximum F-measure.
Parameters
----------
fg_vals : array_like
The list of values for the positive set.
bg_vals : array_like
The list of values for the negative set.
Returns
-------
f : float
Maximum f-measure.
""" |
x, y = roc_values(fg_vals, bg_vals)
x, y = x[1:], y[1:] # don't include origin
p = y / (y + x)
filt = np.logical_and((p * y) > 0, (p + y) > 0)
p = p[filt]
y = y[filt]
f = (2 * p * y) / (p + y)
if len(f) > 0:
#return np.nanmax(f), np.nanmax(y[f == np.nanmax(f)])
return np.nanmax(f)
else:
return None |
<SYSTEM_TASK:>
Computes the Kolmogorov-Smirnov p-value of position distribution.
<END_TASK>
<USER_TASK:>
Description:
def ks_pvalue(fg_pos, bg_pos=None):
"""
Computes the Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
KS p-value.
""" |
if len(fg_pos) == 0:
return 1.0
a = np.array(fg_pos, dtype="float") / max(fg_pos)
p = kstest(a, "uniform")[1]
return p |
<SYSTEM_TASK:>
Computes the -log10 of Kolmogorov-Smirnov p-value of position distribution.
<END_TASK>
<USER_TASK:>
Description:
def ks_significance(fg_pos, bg_pos=None):
"""
Computes the -log10 of Kolmogorov-Smirnov p-value of position distribution.
Parameters
----------
fg_pos : array_like
The list of values for the positive set.
bg_pos : array_like, optional
The list of values for the negative set.
Returns
-------
p : float
-log10(KS p-value).
""" |
p = ks_pvalue(fg_pos, max(fg_pos))
if p > 0:
return -np.log10(p)
else:
return np.inf |
<SYSTEM_TASK:>
Load and shape data for training with Keras + Pescador.
<END_TASK>
<USER_TASK:>
Description:
def setup_data():
"""Load and shape data for training with Keras + Pescador.
Returns
-------
input_shape : tuple, len=3
Shape of each sample; adapts to channel configuration of Keras.
X_train, y_train : np.ndarrays
Images and labels for training.
X_test, y_test : np.ndarrays
Images and labels for test.
""" |
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return input_shape, (x_train, y_train), (x_test, y_test) |
<SYSTEM_TASK:>
Create a compiled Keras model.
<END_TASK>
<USER_TASK:>
Description:
def build_model(input_shape):
"""Create a compiled Keras model.
Parameters
----------
input_shape : tuple, len=3
Shape of each image sample.
Returns
-------
model : keras.Model
Constructed model.
""" |
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model |
<SYSTEM_TASK:>
Return default GimmeMotifs parameters.
<END_TASK>
<USER_TASK:>
Description:
def parse_denovo_params(user_params=None):
"""Return default GimmeMotifs parameters.
Defaults will be replaced with parameters defined in user_params.
Parameters
----------
user_params : dict, optional
User-defined parameters.
Returns
-------
params : dict
""" |
config = MotifConfig()
if user_params is None:
user_params = {}
params = config.get_default_params()
params.update(user_params)
if params.get("torque"):
logger.debug("Using torque")
else:
logger.debug("Using multiprocessing")
params["background"] = [x.strip() for x in params["background"].split(",")]
logger.debug("Parameters:")
for param, value in params.items():
logger.debug(" %s: %s", param, value)
# Maximum time?
if params["max_time"]:
try:
max_time = params["max_time"] = float(params["max_time"])
except Exception:
logger.debug("Could not parse max_time value, setting to no limit")
params["max_time"] = -1
if params["max_time"] > 0:
logger.debug("Time limit for motif prediction: %0.2f hours", max_time)
params["max_time"] = 3600 * params["max_time"]
logger.debug("Max_time in seconds %0.0f", max_time)
else:
logger.debug("No time limit for motif prediction")
return params |
<SYSTEM_TASK:>
Return aggregated ranks.
<END_TASK>
<USER_TASK:>
Description:
def rankagg(df, method="stuart"):
"""Return aggregated ranks.
Implementation is ported from the RobustRankAggreg R package
References:
Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709
Stuart et al., 2003, DOI: 10.1126/science.1087447
Parameters
----------
df : pandas.DataFrame
DataFrame with values to be ranked and aggregated
Returns
-------
pandas.DataFrame with aggregated ranks
""" |
rmat = pd.DataFrame(index=df.iloc[:,0])
step = 1 / rmat.shape[0]
for col in df.columns:
rmat[col] = pd.DataFrame({col:np.arange(step, 1 + step, step)}, index=df[col]).loc[rmat.index]
rmat = rmat.apply(sorted, 1, result_type="expand")
p = rmat.apply(qStuart, 1)
df = pd.DataFrame(
{"p.adjust":multipletests(p, method="h")[1]},
index=rmat.index).sort_values('p.adjust')
return df["p.adjust"] |
<SYSTEM_TASK:>
Yield data, while optionally burning compute cycles.
<END_TASK>
<USER_TASK:>
Description:
def data_gen(n_ops=100):
"""Yield data, while optionally burning compute cycles.
Parameters
----------
n_ops : int, default=100
Number of operations to run between yielding data.
Returns
-------
data : dict
A object which looks like it might come from some
machine learning problem, with X as features, and y as targets.
""" |
while True:
X = np.random.uniform(size=(64, 64))
yield dict(X=costly_function(X, n_ops),
y=np.random.randint(10, size=(1,))) |
<SYSTEM_TASK:>
Predict motifs, input is a FASTA-file
<END_TASK>
<USER_TASK:>
Description:
def predict_motifs(infile, bgfile, outfile, params=None, stats_fg=None, stats_bg=None):
""" Predict motifs, input is a FASTA-file""" |
# Parse parameters
required_params = ["tools", "available_tools", "analysis",
"genome", "use_strand", "max_time"]
if params is None:
params = parse_denovo_params()
else:
for p in required_params:
if p not in params:
params = parse_denovo_params()
break
# Define all tools
tools = dict(
[
(x.strip(), x in [y.strip() for y in params["tools"].split(",")])
for x in params["available_tools"].split(",")
]
)
# Predict the motifs
analysis = params["analysis"]
logger.info("starting motif prediction (%s)", analysis)
logger.info("tools: %s",
", ".join([x for x in tools.keys() if tools[x]]))
result = pp_predict_motifs(
infile,
outfile,
analysis,
params.get("genome", None),
params["use_strand"],
bgfile,
tools,
None,
#logger=logger,
max_time=params["max_time"],
stats_fg=stats_fg,
stats_bg=stats_bg
)
motifs = result.motifs
logger.info("predicted %s motifs", len(motifs))
logger.debug("written to %s", outfile)
if len(motifs) == 0:
logger.info("no motifs found")
result.motifs = []
return result |
<SYSTEM_TASK:>
Add motifs to the result object.
<END_TASK>
<USER_TASK:>
Description:
def add_motifs(self, args):
"""Add motifs to the result object.""" |
self.lock.acquire()
# Callback function for motif programs
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
#job_id = "%s_%s" % (motif.id, motif.to_consensus())
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(motifs, self.fg_fa, bg_fa, bg_name),
callback=self.add_stats
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release() |
<SYSTEM_TASK:>
Make sure all jobs are finished.
<END_TASK>
<USER_TASK:>
Description:
def wait_for_stats(self):
"""Make sure all jobs are finished.""" |
logging.debug("waiting for statistics to finish")
for job in self.stat_jobs:
job.get()
sleep(2) |
<SYSTEM_TASK:>
Prepare a narrowPeak file for de novo motif prediction.
<END_TASK>
<USER_TASK:>
Description:
def prepare_denovo_input_narrowpeak(inputfile, params, outdir):
"""Prepare a narrowPeak file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
""" |
bedfile = os.path.join(outdir, "input.from.narrowpeak.bed")
p = re.compile(r'^(#|track|browser)')
width = int(params["width"])
logger.info("preparing input (narrowPeak to BED, width %s)", width)
warn_no_summit = True
with open(bedfile, "w") as f_out:
with open(inputfile) as f_in:
for line in f_in:
if p.search(line):
continue
vals = line.strip().split("\t")
start, end = int(vals[1]), int(vals[2])
summit = int(vals[9])
if summit == -1:
if warn_no_summit:
logger.warn("No summit present in narrowPeak file, using the peak center.")
warn_no_summit = False
summit = (end - start) // 2
start = start + summit - (width // 2)
end = start + width
f_out.write("{}\t{}\t{}\t{}\n".format(
vals[0],
start,
end,
vals[6]
))
prepare_denovo_input_bed(bedfile, params, outdir) |
<SYSTEM_TASK:>
Prepare a BED file for de novo motif prediction.
<END_TASK>
<USER_TASK:>
Description:
def prepare_denovo_input_bed(inputfile, params, outdir):
"""Prepare a BED file for de novo motif prediction.
All regions to same size; split in test and validation set;
converted to FASTA.
Parameters
----------
inputfile : str
BED file with input regions.
params : dict
Dictionary with parameters.
outdir : str
Output directory to save files.
""" |
logger.info("preparing input (BED)")
# Create BED file with regions of equal size
width = int(params["width"])
bedfile = os.path.join(outdir, "input.bed")
write_equalwidth_bedfile(inputfile, width, bedfile)
abs_max = int(params["abs_max"])
fraction = float(params["fraction"])
pred_bedfile = os.path.join(outdir, "prediction.bed")
val_bedfile = os.path.join(outdir, "validation.bed")
# Split input into prediction and validation set
logger.debug(
"Splitting %s into prediction set (%s) and validation set (%s)",
bedfile, pred_bedfile, val_bedfile)
divide_file(bedfile, pred_bedfile, val_bedfile, fraction, abs_max)
config = MotifConfig()
genome = Genome(params["genome"])
for infile in [pred_bedfile, val_bedfile]:
genome.track2fasta(
infile,
infile.replace(".bed", ".fa"),
)
# Create file for location plots
lwidth = int(params["lwidth"])
extend = (lwidth - width) // 2
genome.track2fasta(
val_bedfile,
os.path.join(outdir, "localization.fa"),
extend_up=extend,
extend_down=extend,
stranded=params["use_strand"],
) |
<SYSTEM_TASK:>
Create background of a specific type.
<END_TASK>
<USER_TASK:>
Description:
def create_background(bg_type, fafile, outfile, genome="hg18", width=200, nr_times=10, custom_background=None):
"""Create background of a specific type.
Parameters
----------
bg_type : str
Name of background type.
fafile : str
Name of input FASTA file.
outfile : str
Name of output FASTA file.
genome : str, optional
Genome name.
width : int, optional
Size of regions.
nr_times : int, optional
Generate this times as many background sequences as compared to
input file.
Returns
-------
nr_seqs : int
Number of sequences created.
""" |
width = int(width)
config = MotifConfig()
fg = Fasta(fafile)
if bg_type in ["genomic", "gc"]:
if not genome:
logger.error("Need a genome to create background")
sys.exit(1)
if bg_type == "random":
f = MarkovFasta(fg, k=1, n=nr_times * len(fg))
logger.debug("Random background: %s", outfile)
elif bg_type == "genomic":
logger.debug("Creating genomic background")
f = RandomGenomicFasta(genome, width, nr_times * len(fg))
elif bg_type == "gc":
logger.debug("Creating GC matched background")
f = MatchedGcFasta(fafile, genome, nr_times * len(fg))
logger.debug("GC matched background: %s", outfile)
elif bg_type == "promoter":
fname = Genome(genome).filename
gene_file = fname.replace(".fa", ".annotation.bed.gz")
if not gene_file:
gene_file = os.path.join(config.get_gene_dir(), "%s.bed" % genome)
if not os.path.exists(gene_file):
print("Could not find a gene file for genome {}")
print("Did you use the --annotation flag for genomepy?")
print("Alternatively make sure there is a file called {}.bed in {}".format(genome, config.get_gene_dir()))
raise ValueError()
logger.info(
"Creating random promoter background (%s, using genes in %s)",
genome, gene_file)
f = PromoterFasta(gene_file, genome, width, nr_times * len(fg))
logger.debug("Random promoter background: %s", outfile)
elif bg_type == "custom":
bg_file = custom_background
if not bg_file:
raise IOError(
"Background file not specified!")
if not os.path.exists(bg_file):
raise IOError(
"Custom background file %s does not exist!",
bg_file)
else:
logger.info("Copying custom background file %s to %s.",
bg_file, outfile)
f = Fasta(bg_file)
l = np.median([len(seq) for seq in f.seqs])
if l < (width * 0.95) or l > (width * 1.05):
logger.warn(
"The custom background file %s contains sequences with a "
"median length of %s, while GimmeMotifs predicts motifs in sequences "
"of length %s. This will influence the statistics! It is recommended "
"to use background sequences of the same length.",
bg_file, l, width)
f.writefasta(outfile)
return len(f) |
<SYSTEM_TASK:>
Create different backgrounds for motif prediction and validation.
<END_TASK>
<USER_TASK:>
Description:
def create_backgrounds(outdir, background=None, genome="hg38", width=200, custom_background=None):
"""Create different backgrounds for motif prediction and validation.
Parameters
----------
outdir : str
Directory to save results.
background : list, optional
Background types to create, default is 'random'.
genome : str, optional
Genome name (for genomic and gc backgrounds).
width : int, optional
Size of background regions
Returns
-------
bg_info : dict
Keys: background name, values: file name.
""" |
if background is None:
background = ["random"]
nr_sequences = {}
# Create background for motif prediction
if "gc" in background:
pred_bg = "gc"
else:
pred_bg = background[0]
create_background(
pred_bg,
os.path.join(outdir, "prediction.fa"),
os.path.join(outdir, "prediction.bg.fa"),
genome=genome,
width=width,
custom_background=custom_background)
# Get background fasta files for statistics
bg_info = {}
nr_sequences = {}
for bg in background:
fname = os.path.join(outdir, "bg.{}.fa".format(bg))
nr_sequences[bg] = create_background(
bg,
os.path.join(outdir, "validation.fa"),
fname,
genome=genome,
width=width,
custom_background=custom_background)
bg_info[bg] = fname
return bg_info |
<SYSTEM_TASK:>
Filter significant motifs based on several statistics.
<END_TASK>
<USER_TASK:>
Description:
def filter_significant_motifs(fname, result, bg, metrics=None):
"""Filter significant motifs based on several statistics.
Parameters
----------
fname : str
Filename of output file were significant motifs will be saved.
result : PredictionResult instance
Contains motifs and associated statistics.
bg : str
Name of background type to use.
metrics : sequence
Metric with associated minimum values. The default is
(("max_enrichment", 3), ("roc_auc", 0.55), ("enr_at_f[r", 0.55))
Returns
-------
motifs : list
List of Motif instances.
""" |
sig_motifs = []
with open(fname, "w") as f:
for motif in result.motifs:
stats = result.stats.get(
"%s_%s" % (motif.id, motif.to_consensus()), {}).get(bg, {}
)
if _is_significant(stats, metrics):
f.write("%s\n" % motif.to_pfm())
sig_motifs.append(motif)
logger.info("%s motifs are significant", len(sig_motifs))
logger.debug("written to %s", fname)
return sig_motifs |
<SYSTEM_TASK:>
Return the best motif per cluster for a clustering results.
<END_TASK>
<USER_TASK:>
Description:
def best_motif_in_cluster(single_pwm, clus_pwm, clusters, fg_fa, background, stats=None, metrics=("roc_auc", "recall_at_fdr")):
"""Return the best motif per cluster for a clustering results.
The motif can be either the average motif or one of the clustered motifs.
Parameters
----------
single_pwm : str
Filename of motifs.
clus_pwm : str
Filename of motifs.
clusters :
Motif clustering result.
fg_fa : str
Filename of FASTA file.
background : dict
Dictionary for background file names.
stats : dict, optional
If statistics are not supplied they will be computed.
metrics : sequence, optional
Metrics to use for motif evaluation. Default are "roc_auc" and
"recall_at_fdr".
Returns
-------
motifs : list
List of Motif instances.
""" |
# combine original and clustered motifs
motifs = read_motifs(single_pwm) + read_motifs(clus_pwm)
motifs = dict([(str(m), m) for m in motifs])
# get the statistics for those motifs that were not yet checked
clustered_motifs = []
for clus,singles in clusters:
for motif in set([clus] + singles):
if str(motif) not in stats:
clustered_motifs.append(motifs[str(motif)])
new_stats = {}
for bg, bg_fa in background.items():
for m,s in calc_stats(clustered_motifs, fg_fa, bg_fa).items():
if m not in new_stats:
new_stats[m] = {}
new_stats[m][bg] = s
stats.update(new_stats)
rank = rank_motifs(stats, metrics)
# rank the motifs
best_motifs = []
for clus, singles in clusters:
if len(singles) > 1:
eval_motifs = singles
if clus not in motifs:
eval_motifs.append(clus)
eval_motifs = [motifs[str(e)] for e in eval_motifs]
best_motif = sorted(eval_motifs, key=lambda x: rank[str(x)])[-1]
best_motifs.append(best_motif)
else:
best_motifs.append(clus)
for bg in background:
stats[str(best_motifs[-1])][bg]["num_cluster"] = len(singles)
best_motifs = sorted(best_motifs, key=lambda x: rank[str(x)], reverse=True)
return best_motifs |
<SYSTEM_TASK:>
Register method to keep list of dbs.
<END_TASK>
<USER_TASK:>
Description:
def register_db(cls, dbname):
"""Register method to keep list of dbs.""" |
def decorator(subclass):
"""Register as decorator function."""
cls._dbs[dbname] = subclass
subclass.name = dbname
return subclass
return decorator |
<SYSTEM_TASK:>
Create a Moap instance based on the predictor name.
<END_TASK>
<USER_TASK:>
Description:
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
""" |
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class") |
<SYSTEM_TASK:>
Register method to keep list of predictors.
<END_TASK>
<USER_TASK:>
Description:
def register_predictor(cls, name):
"""Register method to keep list of predictors.""" |
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator |
<SYSTEM_TASK:>
Determine mean rank of motifs based on metrics.
<END_TASK>
<USER_TASK:>
Description:
def rank_motifs(stats, metrics=("roc_auc", "recall_at_fdr")):
"""Determine mean rank of motifs based on metrics.""" |
rank = {}
combined_metrics = []
motif_ids = stats.keys()
background = list(stats.values())[0].keys()
for metric in metrics:
mean_metric_stats = [np.mean(
[stats[m][bg][metric] for bg in background]) for m in motif_ids]
ranked_metric_stats = rankdata(mean_metric_stats)
combined_metrics.append(ranked_metric_stats)
for motif, val in zip(motif_ids, np.mean(combined_metrics, 0)):
rank[motif] = val
return rank |
<SYSTEM_TASK:>
write motif statistics to text file.
<END_TASK>
<USER_TASK:>
Description:
def write_stats(stats, fname, header=None):
"""write motif statistics to text file.""" |
# Write stats output to file
for bg in list(stats.values())[0].keys():
f = open(fname.format(bg), "w")
if header:
f.write(header)
stat_keys = sorted(list(list(stats.values())[0].values())[0].keys())
f.write("{}\t{}\n".format("Motif", "\t".join(stat_keys)))
for motif in stats:
m_stats = stats.get(str(motif), {}).get(bg)
if m_stats:
f.write("{}\t{}\n".format(
"_".join(motif.split("_")[:-1]),
"\t".join([str(m_stats[k]) for k in stat_keys])
))
else:
logger.warn("No stats for motif {0}, skipping this motif!".format(motif.id))
#motifs.remove(motif)
f.close()
return |
<SYSTEM_TASK:>
Create text report of motifs with statistics and database match.
<END_TASK>
<USER_TASK:>
Description:
def _create_text_report(inputfile, motifs, closest_match, stats, outdir):
"""Create text report of motifs with statistics and database match.""" |
my_stats = {}
for motif in motifs:
match = closest_match[motif.id]
my_stats[str(motif)] = {}
for bg in list(stats.values())[0].keys():
if str(motif) not in stats:
logger.error("####")
logger.error("{} not found".format(str(motif)))
for s in sorted(stats.keys()):
logger.error(s)
logger.error("####")
else:
my_stats[str(motif)][bg] = stats[str(motif)][bg].copy()
my_stats[str(motif)][bg]["best_match"] = "_".join(match[0].split("_")[:-1])
my_stats[str(motif)][bg]["best_match_pvalue"] = match[1][-1]
header = ("# GimmeMotifs version {}\n"
"# Inputfile: {}\n"
).format(__version__, inputfile)
write_stats(my_stats, os.path.join(outdir, "stats.{}.txt"), header=header) |
<SYSTEM_TASK:>
Get rid of all axis ticks, lines, etc.
<END_TASK>
<USER_TASK:>
Description:
def axes_off(ax):
"""Get rid of all axis ticks, lines, etc.
""" |
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False) |
<SYSTEM_TASK:>
Check if the inputfile is a valid bed-file
<END_TASK>
<USER_TASK:>
Description:
def check_bed_file(fname):
""" Check if the inputfile is a valid bed-file """ |
if not os.path.exists(fname):
logger.error("Inputfile %s does not exist!", fname)
sys.exit(1)
for i, line in enumerate(open(fname)):
if line.startswith("#") or line.startswith("track") or line.startswith("browser"):
# comment or BED specific stuff
pass
else:
vals = line.strip().split("\t")
if len(vals) < 3:
logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname)
sys.exit(1)
try:
start, end = int(vals[1]), int(vals[2])
except ValueError:
logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname)
sys.exit(1)
if len(vals) > 3:
try:
float(vals[3])
except ValueError:
pass |
<SYSTEM_TASK:>
Check if an input file is valid, which means BED, narrowPeak or FASTA
<END_TASK>
<USER_TASK:>
Description:
def check_denovo_input(inputfile, params):
"""
Check if an input file is valid, which means BED, narrowPeak or FASTA
""" |
background = params["background"]
input_type = determine_file_type(inputfile)
if input_type == "fasta":
valid_bg = FA_VALID_BGS
elif input_type in ["bed", "narrowpeak"]:
genome = params["genome"]
valid_bg = BED_VALID_BGS
if "genomic" in background or "gc" in background:
Genome(genome)
# is it a valid bed-file etc.
check_bed_file(inputfile) # bed-specific, will also work for narrowPeak
else:
sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile))
sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n")
sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n")
sys.exit(1)
for bg in background:
if not bg in valid_bg:
logger.info("Input type is %s, ignoring background type '%s'",
input_type, bg)
background = [bg for bg in background if bg in valid_bg]
if len(background) == 0:
logger.error("No valid backgrounds specified!")
sys.exit(1)
return input_type, background |
<SYSTEM_TASK:>
Scan a FASTA file with motifs.
<END_TASK>
<USER_TASK:>
Description:
def scan_to_best_match(fname, motifs, ncpus=None, genome=None, score=False):
"""Scan a FASTA file with motifs.
Scan a FASTA file and return a dictionary with the best match per motif.
Parameters
----------
fname : str
Filename of a sequence file in FASTA format.
motifs : list
List of motif instances.
Returns
-------
result : dict
Dictionary with motif scanning results.
""" |
# Initialize scanner
s = Scanner(ncpus=ncpus)
s.set_motifs(motifs)
s.set_threshold(threshold=0.0)
if genome:
s.set_genome(genome)
if isinstance(motifs, six.string_types):
motifs = read_motifs(motifs)
logger.debug("scanning %s...", fname)
result = dict([(m.id, []) for m in motifs])
if score:
it = s.best_score(fname)
else:
it = s.best_match(fname)
for scores in it:
for motif,score in zip(motifs, scores):
result[motif.id].append(score)
# Close the pool and reclaim memory
del s
return result |
<SYSTEM_TASK:>
Set the background to use for FPR and z-score calculations.
<END_TASK>
<USER_TASK:>
Description:
def set_background(self, fname=None, genome=None, length=200, nseq=10000):
"""Set the background to use for FPR and z-score calculations.
Background can be specified either as a genome name or as the
name of a FASTA file.
Parameters
----------
fname : str, optional
Name of FASTA file to use as background.
genome : str, optional
Name of genome to use to retrieve random sequences.
length : int, optional
Length of genomic sequences to retrieve. The default
is 200.
nseq : int, optional
Number of genomic sequences to retrieve.
""" |
length = int(length)
if genome and fname:
raise ValueError("Need either genome or filename for background.")
if fname:
if not os.path.exists(fname):
raise IOError("Background file {} does not exist!".format(fname))
self.background = Fasta(fname)
self.background_hash = file_checksum(fname)
return
if not genome:
if self.genome:
genome = self.genome
logger.info("Using default background: genome {} with length {}".format(
genome, length))
else:
raise ValueError("Need either genome or filename for background.")
logger.info("Using background: genome {} with length {}".format(genome, length))
with Cache(CACHE_DIR) as cache:
self.background_hash = "{}\{}".format(genome, int(length))
fa = cache.get(self.background_hash)
if not fa:
fa = RandomGenomicFasta(genome, length, nseq)
cache.set(self.background_hash, fa)
self.background = fa |
<SYSTEM_TASK:>
Set motif scanning threshold based on background sequences.
<END_TASK>
<USER_TASK:>
Description:
def set_threshold(self, fpr=None, threshold=None):
"""Set motif scanning threshold based on background sequences.
Parameters
----------
fpr : float, optional
Desired FPR, between 0.0 and 1.0.
threshold : float or str, optional
Desired motif threshold, expressed as the fraction of the
difference between minimum and maximum score of the PWM.
Should either be a float between 0.0 and 1.0 or a filename
with thresholds as created by 'gimme threshold'.
""" |
if threshold and fpr:
raise ValueError("Need either fpr or threshold.")
if fpr:
fpr = float(fpr)
if not (0.0 < fpr < 1.0):
raise ValueError("Parameter fpr should be between 0 and 1")
if not self.motifs:
raise ValueError("please run set_motifs() first")
thresholds = {}
motifs = read_motifs(self.motifs)
if threshold is not None:
self.threshold = parse_threshold_values(self.motifs, threshold)
return
if not self.background:
try:
self.set_background()
except:
raise ValueError("please run set_background() first")
seqs = self.background.seqs
with Cache(CACHE_DIR) as cache:
scan_motifs = []
for motif in motifs:
k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr)
threshold = cache.get(k)
if threshold is None:
scan_motifs.append(motif)
else:
if np.isclose(threshold, motif.pwm_max_score()):
thresholds[motif.id] = None
elif np.isclose(threshold, motif.pwm_min_score()):
thresholds[motif.id] = 0.0
else:
thresholds[motif.id] = threshold
if len(scan_motifs) > 0:
logger.info("Determining FPR-based threshold")
for motif, threshold in self._threshold_from_seqs(scan_motifs, seqs, fpr):
k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr)
cache.set(k, threshold)
if np.isclose(threshold, motif.pwm_max_score()):
thresholds[motif.id] = None
elif np.isclose(threshold, motif.pwm_min_score()):
thresholds[motif.id] = 0.0
else:
thresholds[motif.id] = threshold
self.threshold_str = "{}_{}_{}".format(fpr, threshold, self.background_hash)
self.threshold = thresholds |
<SYSTEM_TASK:>
give the score of the best match of each motif in each sequence
<END_TASK>
<USER_TASK:>
Description:
def best_score(self, seqs, scan_rc=True, normalize=False):
"""
give the score of the best match of each motif in each sequence
returns an iterator of lists containing floats
""" |
self.set_threshold(threshold=0.0)
if normalize and len(self.meanstd) == 0:
self.set_meanstd()
means = np.array([self.meanstd[m][0] for m in self.motif_ids])
stds = np.array([self.meanstd[m][1] for m in self.motif_ids])
for matches in self.scan(seqs, 1, scan_rc):
scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0])
if normalize:
scores = (scores - means) / stds
yield scores |
<SYSTEM_TASK:>
Calculate ROC_AUC and other metrics and optionally plot ROC curve.
<END_TASK>
<USER_TASK:>
Description:
def roc(args):
""" Calculate ROC_AUC and other metrics and optionally plot ROC curve.""" |
outputfile = args.outfile
# Default extension for image
if outputfile and not outputfile.endswith(".png"):
outputfile += ".png"
motifs = read_motifs(args.pwmfile, fmt="pwm")
ids = []
if args.ids:
ids = args.ids.split(",")
else:
ids = [m.id for m in motifs]
motifs = [m for m in motifs if (m.id in ids)]
stats = [
"phyper_at_fpr",
"roc_auc",
"pr_auc",
"enr_at_fpr",
"recall_at_fdr",
"roc_values",
"matches_at_fpr",
]
plot_x = []
plot_y = []
legend = []
f_out = sys.stdout
if args.outdir:
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
f_out = open(args.outdir + "/gimme.roc.report.txt", "w")
# Print the metrics
f_out.write("Motif\t# matches\t# matches background\tP-value\tlog10 P-value\tROC AUC\tPR AUC\tEnr. at 1% FPR\tRecall at 10% FDR\n")
for motif_stats in calc_stats_iterator(motifs, args.sample, args.background,
genome=args.genome, stats=stats, ncpus=args.ncpus):
for motif in motifs:
if str(motif) in motif_stats:
if outputfile:
x, y = motif_stats[str(motif)]["roc_values"]
plot_x.append(x)
plot_y.append(y)
legend.append(motif.id)
log_pvalue = np.inf
if motif_stats[str(motif)]["phyper_at_fpr"] > 0:
log_pvalue = -np.log10(motif_stats[str(motif)]["phyper_at_fpr"])
f_out.write("{}\t{:d}\t{:d}\t{:.2e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.2f}\t{:0.4f}\n".format(
motif.id,
motif_stats[str(motif)]["matches_at_fpr"][0],
motif_stats[str(motif)]["matches_at_fpr"][1],
motif_stats[str(motif)]["phyper_at_fpr"],
log_pvalue,
motif_stats[str(motif)]["roc_auc"],
motif_stats[str(motif)]["pr_auc"],
motif_stats[str(motif)]["enr_at_fpr"],
motif_stats[str(motif)]["recall_at_fdr"],
))
f_out.close()
if args.outdir:
html_report(
args.outdir,
args.outdir + "/gimme.roc.report.txt",
args.pwmfile,
0.01,
)
# Plot the ROC curve
if outputfile:
roc_plot(outputfile, plot_x, plot_y, ids=legend) |
<SYSTEM_TASK:>
Calculates motif similarity based on Pearson correlation of scores.
<END_TASK>
<USER_TASK:>
Description:
def seqcor(m1, m2, seq=None):
"""Calculates motif similarity based on Pearson correlation of scores.
Based on Kielbasa (2015) and Grau (2015).
Scores are calculated based on scanning a de Bruijn sequence of 7-mers.
This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015).
Optionally another sequence can be given as an argument.
Parameters
----------
m1 : Motif instance
Motif 1 to compare.
m2 : Motif instance
Motif 2 to compare.
seq : str, optional
Sequence to use for scanning instead of k=7 de Bruijn sequence.
Returns
-------
score, position, strand
""" |
l1 = len(m1)
l2 = len(m2)
l = max(l1, l2)
if seq is None:
seq = RCDB
L = len(seq)
# Scan RC de Bruijn sequence
result1 = pfmscan(seq, m1.pwm, m1.pwm_min_score(), len(seq), False, True)
result2 = pfmscan(seq, m2.pwm, m2.pwm_min_score(), len(seq), False, True)
# Reverse complement of motif 2
result3 = pfmscan(seq, m2.rc().pwm, m2.rc().pwm_min_score(), len(seq), False, True)
result1 = np.array(result1)
result2 = np.array(result2)
result3 = np.array(result3)
# Return maximum correlation
c = []
for i in range(l1 - l1 // 3):
c.append([1 - distance.correlation(result1[:L-l-i],result2[i:L-l]), i, 1])
c.append([1 - distance.correlation(result1[:L-l-i],result3[i:L-l]), i, -1])
for i in range(l2 - l2 // 3):
c.append([1 - distance.correlation(result1[i:L-l],result2[:L-l-i]), -i, 1])
c.append([1 - distance.correlation(result1[i:L-l],result3[:L-l-i]), -i, -1])
return sorted(c, key=lambda x: x[0])[-1] |
<SYSTEM_TASK:>
Compare two motifs.
<END_TASK>
<USER_TASK:>
Description:
def compare_motifs(self, m1, m2, match="total", metric="wic", combine="mean", pval=False):
"""Compare two motifs.
The similarity metric can be any of seqcor, pcc, ed, distance, wic,
chisq, akl or ssd. If match is 'total' the similarity score is
calculated for the whole match, including positions that are not
present in both motifs. If match is partial or subtotal, only the
matching psotiions are used to calculate the score. The score of
individual position is combined using either the mean or the sum.
Note that the match and combine parameters have no effect on the seqcor
similarity metric.
Parameters
----------
m1 : Motif instance
Motif instance 1.
m2 : Motif instance
Motif instance 2.
match : str, optional
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str, optional
Distance metric.
combine : str, optional
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool, optional
Calculate p-vale of match.
Returns
-------
score, position, strand
""" |
if metric == "seqcor":
return seqcor(m1, m2)
elif match == "partial":
if pval:
return self.pvalue(m1, m2, "total", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine))
elif metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]:
return self.max_partial(m1.pwm, m2.pwm, metric, combine)
else:
return self.max_partial(m1.pfm, m2.pfm, metric, combine)
elif match == "total":
if pval:
return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine))
elif metric in ["pcc", 'akl']:
# Slightly randomize the weight matrix
return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine)
elif metric in ["ed", "distance", "wic", "chisq", "pcc", "ssd"]:
return self.max_total(m1.pwm, m2.pwm, metric, combine)
else:
return self.max_total(m1.pfm, m2.pfm, metric, combine)
elif match == "subtotal":
if metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]:
return self.max_subtotal(m1.pwm, m2.pwm, metric, combine)
else:
return self.max_subtotal(m1.pfm, m2.pfm, metric, combine) |
<SYSTEM_TASK:>
Pairwise comparison of a set of motifs compared to reference motifs.
<END_TASK>
<USER_TASK:>
Description:
def get_all_scores(self, motifs, dbmotifs, match, metric, combine,
pval=False, parallel=True, trim=None, ncpus=None):
"""Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores.
""" |
# trim motifs first, if specified
if trim:
for m in motifs:
m.trim(trim)
for m in dbmotifs:
m.trim(trim)
# hash of result scores
scores = {}
if parallel:
# Divide the job into big chunks, to keep parallel overhead to minimum
# Number of chunks = number of processors available
if ncpus is None:
ncpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=ncpus, maxtasksperchild=1000)
batch_len = len(dbmotifs) // ncpus
if batch_len <= 0:
batch_len = 1
jobs = []
for i in range(0, len(dbmotifs), batch_len):
# submit jobs to the job server
p = pool.apply_async(_get_all_scores,
args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval))
jobs.append(p)
pool.close()
for job in jobs:
# Get the job result
result = job.get()
# and update the result score
for m1,v in result.items():
for m2, s in v.items():
if m1 not in scores:
scores[m1] = {}
scores[m1][m2] = s
pool.join()
else:
# Do the whole thing at once if we don't want parallel
scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval)
return scores |
<SYSTEM_TASK:>
Return best match in database for motifs.
<END_TASK>
<USER_TASK:>
Description:
def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None):
"""Return best match in database for motifs.
Parameters
----------
motifs : list or str
Filename of motifs or list of motifs.
dbmotifs : list or str, optional
Database motifs, default will be used if not specified.
match : str, optional
metric : str, optional
combine : str, optional
ncpus : int, optional
Number of threads to use.
Returns
-------
closest_match : dict
""" |
if dbmotifs is None:
pwm = self.config.get_default_params()["motif_db"]
pwmdir = self.config.get_motif_dir()
dbmotifs = os.path.join(pwmdir, pwm)
motifs = parse_motifs(motifs)
dbmotifs = parse_motifs(dbmotifs)
dbmotif_lookup = dict([(m.id, m) for m in dbmotifs])
scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus)
for motif in scores:
scores[motif] = sorted(
scores[motif].items(),
key=lambda x:x[1][0]
)[-1]
for motif in motifs:
dbmotif, score = scores[motif.id]
pval, pos, orient = self.compare_motifs(
motif, dbmotif_lookup[dbmotif], match, metric, combine, True)
scores[motif.id] = [dbmotif, (list(score) + [pval])]
return scores |
<SYSTEM_TASK:>
List regions for the service
<END_TASK>
<USER_TASK:>
Description:
def list_regions(service):
"""
List regions for the service
""" |
for region in service.regions():
print '%(name)s: %(endpoint)s' % {
'name': region.name,
'endpoint': region.endpoint,
} |
<SYSTEM_TASK:>
Print nice looking table of information from list of load balancers
<END_TASK>
<USER_TASK:>
Description:
def elb_table(balancers):
"""
Print nice looking table of information from list of load balancers
""" |
t = prettytable.PrettyTable(['Name', 'DNS', 'Ports', 'Zones', 'Created'])
t.align = 'l'
for b in balancers:
ports = ['%s: %s -> %s' % (l[2], l[0], l[1]) for l in b.listeners]
ports = '\n'.join(ports)
zones = '\n'.join(b.availability_zones)
t.add_row([b.name, b.dns_name, ports, zones, b.created_time])
return t |
<SYSTEM_TASK:>
Print nice looking table of information from list of instances
<END_TASK>
<USER_TASK:>
Description:
def ec2_table(instances):
"""
Print nice looking table of information from list of instances
""" |
t = prettytable.PrettyTable(['ID', 'State', 'Monitored', 'Image', 'Name', 'Type', 'SSH key', 'DNS'])
t.align = 'l'
for i in instances:
name = i.tags.get('Name', '')
t.add_row([i.id, i.state, i.monitored, i.image_id, name, i.instance_type, i.key_name, i.dns_name])
return t |
<SYSTEM_TASK:>
Print nice looking table of information from images
<END_TASK>
<USER_TASK:>
Description:
def ec2_image_table(images):
"""
Print nice looking table of information from images
""" |
t = prettytable.PrettyTable(['ID', 'State', 'Name', 'Owner', 'Root device', 'Is public', 'Description'])
t.align = 'l'
for i in images:
t.add_row([i.id, i.state, i.name, i.ownerId, i.root_device_type, i.is_public, i.description])
return t |
<SYSTEM_TASK:>
Run Fabric commands against EC2 instances
<END_TASK>
<USER_TASK:>
Description:
def ec2_fab(service, args):
"""
Run Fabric commands against EC2 instances
""" |
instance_ids = args.instances
instances = service.list(elb=args.elb, instance_ids=instance_ids)
hosts = service.resolve_hosts(instances)
fab.env.hosts = hosts
fab.env.key_filename = settings.get('SSH', 'KEY_FILE')
fab.env.user = settings.get('SSH', 'USER', getpass.getuser())
fab.env.parallel = True
fabfile = find_fabfile(args.file)
if not fabfile:
print 'Couldn\'t find any fabfiles!'
return
fab.env.real_fabile = fabfile
docstring, callables, default = load_fabfile(fabfile)
fab_state.commands.update(callables)
commands_to_run = parse_arguments(args.methods)
for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run:
fab.execute(name,
hosts=arg_hosts,
roles=arg_roles,
exclude_hosts=arg_exclude_hosts,
*args, **kwargs) |
<SYSTEM_TASK:>
Reformat data as tuples.
<END_TASK>
<USER_TASK:>
Description:
def tuples(stream, *keys):
"""Reformat data as tuples.
Parameters
----------
stream : iterable
Stream of data objects.
*keys : strings
Keys to use for ordering data.
Yields
------
items : tuple of np.ndarrays
Data object reformated as a tuple.
Raises
------
DataError
If the stream contains items that are not data-like.
KeyError
If a data object does not contain the requested key.
""" |
if not keys:
raise PescadorError('Unable to generate tuples from '
'an empty item set')
for data in stream:
try:
yield tuple(data[key] for key in keys)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) |
<SYSTEM_TASK:>
Reformat data objects as keras-compatible tuples.
<END_TASK>
<USER_TASK:>
Description:
def keras_tuples(stream, inputs=None, outputs=None):
"""Reformat data objects as keras-compatible tuples.
For more detail: https://keras.io/models/model/#fit
Parameters
----------
stream : iterable
Stream of data objects.
inputs : string or iterable of strings, None
Keys to use for ordered input data.
If not specified, returns `None` in its place.
outputs : string or iterable of strings, default=None
Keys to use for ordered output data.
If not specified, returns `None` in its place.
Yields
------
x : np.ndarray, list of np.ndarray, or None
If `inputs` is a string, `x` is a single np.ndarray.
If `inputs` is an iterable of strings, `x` is a list of np.ndarrays.
If `inputs` is a null type, `x` is None.
y : np.ndarray, list of np.ndarray, or None
If `outputs` is a string, `y` is a single np.ndarray.
If `outputs` is an iterable of strings, `y` is a list of np.ndarrays.
If `outputs` is a null type, `y` is None.
Raises
------
DataError
If the stream contains items that are not data-like.
""" |
flatten_inputs, flatten_outputs = False, False
if inputs and isinstance(inputs, six.string_types):
inputs = [inputs]
flatten_inputs = True
if outputs and isinstance(outputs, six.string_types):
outputs = [outputs]
flatten_outputs = True
inputs, outputs = (inputs or []), (outputs or [])
if not inputs + outputs:
raise PescadorError('At least one key must be given for '
'`inputs` or `outputs`')
for data in stream:
try:
x = list(data[key] for key in inputs) or None
if len(inputs) == 1 and flatten_inputs:
x = x[0]
y = list(data[key] for key in outputs) or None
if len(outputs) == 1 and flatten_outputs:
y = y[0]
yield (x, y)
except TypeError:
raise DataError("Malformed data stream: {}".format(data)) |
<SYSTEM_TASK:>
Creates histrogram of motif location.
<END_TASK>
<USER_TASK:>
Description:
def location(args):
"""
Creates histrogram of motif location.
Parameters
----------
args : argparse object
Command line arguments.
""" |
fastafile = args.fastafile
pwmfile = args.pwmfile
lwidth = args.width
if not lwidth:
f = Fasta(fastafile)
lwidth = len(f.items()[0][1])
f = None
jobs = []
motifs = pwmfile_to_motifs(pwmfile)
ids = [motif.id for motif in motifs]
if args.ids:
ids = args.ids.split(",")
n_cpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=n_cpus, maxtasksperchild=1000)
for motif in motifs:
if motif.id in ids:
outfile = os.path.join("%s_histogram" % motif.id)
jobs.append(
pool.apply_async(
motif_localization,
(fastafile,motif,lwidth,outfile, args.cutoff)
))
for job in jobs:
job.get() |
<SYSTEM_TASK:>
Find location of executable.
<END_TASK>
<USER_TASK:>
Description:
def which(fname):
"""Find location of executable.""" |
if "PATH" not in os.environ or not os.environ["PATH"]:
path = os.defpath
else:
path = os.environ["PATH"]
for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]:
p = os.path.abspath(p)
if os.access(p, os.X_OK) and not os.path.isdir(p):
return p
p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
(stdout, stderr) = p.communicate()
if not stderr:
for p in stdout.decode().split("\n"):
if (os.path.basename(p) == fname) and (
os.access(p, os.X_OK)) and (
not os.path.isdir(p)):
return p |
<SYSTEM_TASK:>
Find all files in a directory by extension.
<END_TASK>
<USER_TASK:>
Description:
def find_by_ext(dirname, ext):
"""Find all files in a directory by extension.""" |
# Get all fasta-files
try:
files = os.listdir(dirname)
except OSError:
if os.path.exists(dirname):
cmd = "find {0} -maxdepth 1 -name \"*\"".format(dirname)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, _stderr = p.communicate()
files = [os.path.basename(fname) for fname in stdout.decode().splitlines()]
else:
raise
retfiles = [os.path.join(dirname, fname) for fname in files if
os.path.splitext(fname)[-1] in ext]
return retfiles |
<SYSTEM_TASK:>
Return list of Motif instances from default motif database.
<END_TASK>
<USER_TASK:>
Description:
def default_motifs():
"""Return list of Motif instances from default motif database.""" |
config = MotifConfig()
d = config.get_motif_dir()
m = config.get_default_params()['motif_db']
if not d or not m:
raise ValueError("default motif database not configured")
fname = os.path.join(d, m)
with open(fname) as f:
motifs = read_motifs(f)
return motifs |
<SYSTEM_TASK:>
Convert alignment to motif.
<END_TASK>
<USER_TASK:>
Description:
def motif_from_align(align):
"""Convert alignment to motif.
Converts a list with sequences to a motif. Sequences should be the same
length.
Parameters
----------
align : list
List with sequences (A,C,G,T).
Returns
-------
m : Motif instance
Motif created from the aligned sequences.
""" |
width = len(align[0])
nucs = {"A":0,"C":1,"G":2,"T":3}
pfm = [[0 for _ in range(4)] for _ in range(width)]
for row in align:
for i in range(len(row)):
pfm[i][nucs[row[i]]] += 1
m = Motif(pfm)
m.align = align[:]
return m |
<SYSTEM_TASK:>
Convert consensus sequence to motif.
<END_TASK>
<USER_TASK:>
Description:
def motif_from_consensus(cons, n=12):
"""Convert consensus sequence to motif.
Converts a consensus sequences using the nucleotide IUPAC alphabet to a
motif.
Parameters
----------
cons : str
Consensus sequence using the IUPAC alphabet.
n : int , optional
Count used to convert the sequence to a PFM.
Returns
-------
m : Motif instance
Motif created from the consensus.
""" |
width = len(cons)
nucs = {"A":0,"C":1,"G":2,"T":3}
pfm = [[0 for _ in range(4)] for _ in range(width)]
m = Motif()
for i,char in enumerate(cons):
for nuc in m.iupac[char.upper()]:
pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()])
m = Motif(pfm)
m.id = cons
return m |
<SYSTEM_TASK:>
Parse motifs in a variety of formats to return a list of motifs.
<END_TASK>
<USER_TASK:>
Description:
def parse_motifs(motifs):
"""Parse motifs in a variety of formats to return a list of motifs.
Parameters
----------
motifs : list or str
Filename of motif, list of motifs or single Motif instance.
Returns
-------
motifs : list
List of Motif instances.
""" |
if isinstance(motifs, six.string_types):
with open(motifs) as f:
if motifs.endswith("pwm") or motifs.endswith("pfm"):
motifs = read_motifs(f, fmt="pwm")
elif motifs.endswith("transfac"):
motifs = read_motifs(f, fmt="transfac")
else:
motifs = read_motifs(f)
elif isinstance(motifs, Motif):
motifs = [motifs]
else:
if not isinstance(list(motifs)[0], Motif):
raise ValueError("Not a list of motifs")
return list(motifs) |
<SYSTEM_TASK:>
Read motifs from a file or stream or file-like object.
<END_TASK>
<USER_TASK:>
Description:
def read_motifs(infile=None, fmt="pwm", as_dict=False):
"""
Read motifs from a file or stream or file-like object.
Parameters
----------
infile : string or file-like object, optional
Motif database, filename of motif file or file-like object. If infile
is not specified the default motifs as specified in the config file
will be returned.
fmt : string, optional
Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'.
as_dict : boolean, optional
Return motifs as a dictionary with motif_id, motif pairs.
Returns
-------
motifs : list
List of Motif instances. If as_dict is set to True, motifs is a
dictionary.
""" |
if infile is None or isinstance(infile, six.string_types):
infile = pwmfile_location(infile)
with open(infile) as f:
motifs = _read_motifs_from_filehandle(f, fmt)
else:
motifs = _read_motifs_from_filehandle(infile, fmt)
if as_dict:
motifs = {m.id:m for m in motifs}
return motifs |
<SYSTEM_TASK:>
Calculate the log-odds score for a specific k-mer.
<END_TASK>
<USER_TASK:>
Description:
def score_kmer(self, kmer):
"""Calculate the log-odds score for a specific k-mer.
Parameters
----------
kmer : str
String representing a kmer. Should be the same length as the motif.
Returns
-------
score : float
Log-odd score.
""" |
if len(kmer) != len(self.pwm):
raise Exception("incorrect k-mer length")
score = 0.0
d = {"A":0, "C":1, "G":2, "T":3}
for nuc, row in zip(kmer.upper(), self.pwm):
score += log(row[d[nuc]] / 0.25 + 0.01)
return score |
<SYSTEM_TASK:>
Convert PFM with counts to a PFM with fractions.
<END_TASK>
<USER_TASK:>
Description:
def pfm_to_pwm(self, pfm, pseudo=0.001):
"""Convert PFM with counts to a PFM with fractions.
Parameters
----------
pfm : list
2-dimensional list with counts.
pseudo : float
Pseudocount used in conversion.
Returns
-------
pwm : list
2-dimensional list with fractions.
""" |
return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm] |
<SYSTEM_TASK:>
Calculate the information content of one position.
<END_TASK>
<USER_TASK:>
Description:
def ic_pos(self, row1, row2=None):
"""Calculate the information content of one position.
Returns
-------
score : float
Information content.
""" |
if row2 is None:
row2 = [0.25,0.25,0.25,0.25]
score = 0
for a,b in zip(row1, row2):
if a > 0:
score += a * log(a / b) / log(2)
return score |
<SYSTEM_TASK:>
Calculate the Pearson correlation coefficient of one position
<END_TASK>
<USER_TASK:>
Description:
def pcc_pos(self, row1, row2):
"""Calculate the Pearson correlation coefficient of one position
compared to another position.
Returns
-------
score : float
Pearson correlation coefficient.
""" |
mean1 = np.mean(row1)
mean2 = np.mean(row2)
a = 0
x = 0
y = 0
for n1, n2 in zip(row1, row2):
a += (n1 - mean1) * (n2 - mean2)
x += (n1 - mean1) ** 2
y += (n2 - mean2) ** 2
if a == 0:
return 0
else:
return a / sqrt(x * y) |
<SYSTEM_TASK:>
Return the reverse complemented motif.
<END_TASK>
<USER_TASK:>
Description:
def rc(self):
"""Return the reverse complemented motif.
Returns
-------
m : Motif instance
New Motif instance with the reverse complement of the input motif.
""" |
m = Motif()
m.pfm = [row[::-1] for row in self.pfm[::-1]]
m.pwm = [row[::-1] for row in self.pwm[::-1]]
m.id = self.id + "_revcomp"
return m |
<SYSTEM_TASK:>
Trim positions with an information content lower than the threshold.
<END_TASK>
<USER_TASK:>
Description:
def trim(self, edge_ic_cutoff=0.4):
"""Trim positions with an information content lower than the threshold.
The default threshold is set to 0.4. The Motif will be changed in-place.
Parameters
----------
edge_ic_cutoff : float, optional
Information content threshold. All motif positions at the flanks
with an information content lower thab this will be removed.
Returns
-------
m : Motif instance
""" |
pwm = self.pwm[:]
while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff:
pwm = pwm[1:]
self.pwm = self.pwm[1:]
self.pfm = self.pfm[1:]
while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff:
pwm = pwm[:-1]
self.pwm = self.pwm[:-1]
self.pfm = self.pfm[:-1]
self.consensus = None
self.min_score = None
self.max_score = None
self.wiggled_pwm = None
return self |
<SYSTEM_TASK:>
Scan FASTA with the motif as a consensus sequence.
<END_TASK>
<USER_TASK:>
Description:
def consensus_scan(self, fa):
"""Scan FASTA with the motif as a consensus sequence.
Parameters
----------
fa : Fasta object
Fasta object to scan
Returns
-------
matches : dict
Dictionaru with matches.
""" |
regexp = "".join(["[" + "".join(self.iupac[x.upper()]) + "]" for x in self.to_consensusv2()])
p = re.compile(regexp)
matches = {}
for name,seq in fa.items():
matches[name] = []
for match in p.finditer(seq):
middle = (match.span()[1] + match.span()[0]) / 2
matches[name].append(middle)
return matches |
<SYSTEM_TASK:>
Scan sequences with this motif and save to a GFF file.
<END_TASK>
<USER_TASK:>
Description:
def pwm_scan_to_gff(self, fa, gfffile, cutoff=0.9, nreport=50, scan_rc=True, append=False):
"""Scan sequences with this motif and save to a GFF file.
Scan sequences from a FASTA object with this motif. Less efficient
than using a Scanner object. By setting the cutoff to 0.0 and
nreport to 1, the best match for every sequence will be returned.
The output is save to a file in GFF format.
Parameters
----------
fa : Fasta object
Fasta object to scan.
gfffile : str
Filename of GFF output file.
cutoff : float , optional
Cutoff to use for motif scanning. This cutoff is not specifically
optimized and the strictness will vary a lot with motif lengh.
nreport : int , optional
Maximum number of matches to report.
scan_rc : bool , optional
Scan the reverse complement. True by default.
append : bool , optional
Append to GFF file instead of overwriting it. False by default.
""" |
if append:
out = open(gfffile, "a")
else:
out = open(gfffile, "w")
c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff
pwm = self.pwm
strandmap = {-1:"-","-1":"-","-":"-","1":"+",1:"+","+":"+"}
gff_line = ("{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t"
"motif_name \"{}\" ; motif_instance \"{}\"\n")
for name, seq in fa.items():
result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc)
for score, pos, strand in result:
out.write(gff_line.format(
name,
pos,
pos + len(pwm),
score,
strandmap[strand],
self.id,
seq[pos:pos + len(pwm)]
))
out.close() |
<SYSTEM_TASK:>
Return the average of two motifs.
<END_TASK>
<USER_TASK:>
Description:
def average_motifs(self, other, pos, orientation, include_bg=False):
"""Return the average of two motifs.
Combine this motif with another motif and return the average as a new
Motif object. The position and orientatien need to be supplied. The pos
parameter is the position of the second motif relative to this motif.
For example, take the following two motifs:
Motif 1: CATGYT
Motif 2: GGCTTGY
With position -2, the motifs are averaged as follows:
xxCATGYT
GGCTTGYx
Parameters
----------
other : Motif object
Other Motif object.
pos : int
Position of the second motif relative to this motif.
orientation : int
Orientation, should be 1 or -1. If the orientation is -1 then the
reverse complement of the other motif is used for averaging.
include_bg : bool , optional
Extend both motifs with background frequencies (0.25) before
averaging. False by default.
Returns
-------
motif : motif object
New Motif object containing average motif.
""" |
# xxCATGYT
# GGCTTGYx
# pos = -2
pfm1 = self.pfm[:]
pfm2 = other.pfm[:]
if orientation < 0:
pfm2 = [row[::-1] for row in pfm2[::-1]]
pfm1_count = float(np.sum(pfm1[0]))
pfm2_count = float(np.sum(pfm2[0]))
if include_bg:
if len(pfm1) > len(pfm2) + pos:
pfm2 += [[pfm2_count / 4.0 for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)]
elif len(pfm2) + pos > len(pfm1):
pfm1 += [[pfm1_count / 4.0 for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)]
if pos < 0:
pfm1 = [[pfm1_count / 4.0 for x in range(4)] for i in range(-pos)] + pfm1
elif pos > 0:
pfm2 = [[pfm2_count / 4.0 for x in range(4)] for i in range(pos)] + pfm2
else:
if len(pfm1) > len(pfm2) + pos:
pfm2 += [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)]
elif len(pfm2) + pos > len(pfm1):
pfm1 += [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)]
if pos < 0:
pfm1 = [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-pos)] + pfm1
elif pos > 0:
pfm2 = [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(pos)] + pfm2
pfm = [[a + b for a,b in zip(x,y)] for x,y in zip(pfm1, pfm2)]
m = Motif(pfm)
m.id = m.to_consensus()
return m |
<SYSTEM_TASK:>
Return pwm as string.
<END_TASK>
<USER_TASK:>
Description:
def to_pwm(self, precision=4, extra_str=""):
"""Return pwm as string.
Parameters
----------
precision : int, optional, default 4
Floating-point precision.
extra_str |: str, optional
Extra text to include with motif id line.
Returns
-------
motif_str : str
Motif formatted in PWM format.
""" |
motif_id = self.id
if extra_str:
motif_id += "_%s" % extra_str
if not self.pwm:
self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()]
return ">%s\n%s" % (
motif_id,
self._pwm_to_str(precision)
) |
<SYSTEM_TASK:>
Create a sequence logo using seqlogo.
<END_TASK>
<USER_TASK:>
Description:
def to_img(self, fname, fmt="PNG", add_left=0, seqlogo=None, height=6):
"""Create a sequence logo using seqlogo.
Create a sequence logo and save it to a file. Valid formats are: PNG,
EPS, GIF and PDF.
Parameters
----------
fname : str
Output filename.
fmt : str , optional
Output format (case-insensitive). Valid formats are PNG, EPS, GIF
and PDF.
add_left : int , optional
Pad motif with empty positions on the left side.
seqlogo : str
Location of the seqlogo executable. By default the seqlogo version
that is included with GimmeMotifs is used.
height : float
Height of the image
""" |
if not seqlogo:
seqlogo = self.seqlogo
if not seqlogo:
raise ValueError("seqlogo not specified or configured")
#TODO: split to_align function
VALID_FORMATS = ["EPS", "GIF", "PDF", "PNG"]
N = 1000
fmt = fmt.upper()
if not fmt in VALID_FORMATS:
sys.stderr.write("Invalid motif format\n")
return
if fname[-4:].upper() == (".%s" % fmt):
fname = fname[:-4]
seqs = []
if add_left == 0:
seqs = ["" for i in range(N)]
else:
for nuc in ["A", "C", "T", "G"]:
seqs += [nuc * add_left for i in range(N // 4)]
for pos in range(len(self.pwm)):
vals = [self.pwm[pos][0] * N]
for i in range(1,4):
vals.append(vals[i-1] + self.pwm[pos][i] * N)
if vals[3] - N != 0:
#print "Motif weights don't add up to 1! Error of %s%%" % ((vals[3] - n)/ n * 100)
vals[3] = N
for i in range(N):
if i <= vals[0]:
seqs[i] += "A"
elif i <= vals[1]:
seqs[i] += "C"
elif i <= vals[2]:
seqs[i] += "G"
elif i <= vals[3]:
seqs[i] += "T"
f = NamedTemporaryFile(mode="w", dir=mytmpdir())
for seq in seqs:
f.write("%s\n" % seq)
f.flush()
makelogo = "{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y"
cmd = makelogo.format(
seqlogo,
f.name,
fmt,
height,
len(self) + add_left,
fname)
sp.call(cmd, shell=True) |
<SYSTEM_TASK:>
Create a new motif with shuffled positions.
<END_TASK>
<USER_TASK:>
Description:
def randomize(self):
"""Create a new motif with shuffled positions.
Shuffle the positions of this motif and return a new Motif instance.
Returns
-------
m : Motif instance
Motif instance with shuffled positions.
""" |
random_pfm = [[c for c in row] for row in self.pfm]
random.shuffle(random_pfm)
m = Motif(pfm=random_pfm)
m.id = "random"
return m |
<SYSTEM_TASK:>
Receive data over a socket.
<END_TASK>
<USER_TASK:>
Description:
def zmq_recv_data(socket, flags=0, copy=True, track=False):
"""Receive data over a socket.""" |
data = dict()
msg = socket.recv_multipart(flags=flags, copy=copy, track=track)
headers = json.loads(msg[0].decode('ascii'))
if len(headers) == 0:
raise StopIteration
for header, payload in zip(headers, msg[1:]):
data[header['key']] = np.frombuffer(buffer(payload),
dtype=header['dtype'])
data[header['key']].shape = header['shape']
if six.PY2:
# Legacy python won't let us preserve alignment, skip this step
continue
data[header['key']].flags['ALIGNED'] = header['aligned']
return data |
<SYSTEM_TASK:>
Mask all lowercase nucleotides with N's
<END_TASK>
<USER_TASK:>
Description:
def hardmask(self):
""" Mask all lowercase nucleotides with N's """ |
p = re.compile("a|c|g|t|n")
for seq_id in self.fasta_dict.keys():
self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id])
return self |
<SYSTEM_TASK:>
Write sequences to FASTA formatted file
<END_TASK>
<USER_TASK:>
Description:
def writefasta(self, fname):
""" Write sequences to FASTA formatted file""" |
f = open(fname, "w")
fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()])
f.write(fa_str)
f.close() |
<SYSTEM_TASK:>
Activates a number of streams
<END_TASK>
<USER_TASK:>
Description:
def _activate(self):
"""Activates a number of streams""" |
self.distribution_ = 1. / self.n_streams * np.ones(self.n_streams)
self.valid_streams_ = np.ones(self.n_streams, dtype=bool)
self.streams_ = [None] * self.k
self.stream_weights_ = np.zeros(self.k)
self.stream_counts_ = np.zeros(self.k, dtype=int)
# Array of pointers into `self.streamers`
self.stream_idxs_ = np.zeros(self.k, dtype=int)
for idx in range(self.k):
if not (self.distribution_ > 0).any():
break
self.stream_idxs_[idx] = self.rng.choice(
self.n_streams, p=self.distribution_)
self.streams_[idx], self.stream_weights_[idx] = (
self._new_stream(self.stream_idxs_[idx]))
self.weight_norm_ = np.sum(self.stream_weights_) |
<SYSTEM_TASK:>
Yields items from the mux, and handles stream exhaustion and
<END_TASK>
<USER_TASK:>
Description:
def iterate(self, max_iter=None):
"""Yields items from the mux, and handles stream exhaustion and
replacement.
""" |
if max_iter is None:
max_iter = np.inf
# Calls Streamer's __enter__, which calls activate()
with self as active_mux:
# Main sampling loop
n = 0
while n < max_iter and active_mux._streamers_available():
# Pick a stream from the active set
idx = active_mux._next_sample_index()
# Can we sample from it?
try:
# Then yield the sample
yield six.advance_iterator(active_mux.streams_[idx])
# Increment the sample counter
n += 1
active_mux.stream_counts_[idx] += 1
except StopIteration:
# Oops, this stream is exhausted.
# Call child-class exhausted-stream behavior
active_mux._on_stream_exhausted(idx)
# Setup a new stream for this index
active_mux._replace_stream(idx) |
<SYSTEM_TASK:>
ShuffledMux's activate is similar to StochasticMux,
<END_TASK>
<USER_TASK:>
Description:
def _activate(self):
"""ShuffledMux's activate is similar to StochasticMux,
but there is no 'n_active', since all the streams are always available.
""" |
self.streams_ = [None] * self.n_streams
# Weights of the active streams.
# Once a stream is exhausted, it is set to 0.
# Upon activation, this is just a copy of self.weights.
self.stream_weights_ = np.array(self.weights, dtype=float)
# How many samples have been drawn from each (active) stream.
self.stream_counts_ = np.zeros(self.n_streams, dtype=int)
# Initialize each active stream.
for idx in range(self.n_streams):
# Setup a new streamer at this index.
self._new_stream(idx)
self.weight_norm_ = np.sum(self.stream_weights_) |
<SYSTEM_TASK:>
ShuffledMux chooses its next sample stream randomly,
<END_TASK>
<USER_TASK:>
Description:
def _next_sample_index(self):
"""ShuffledMux chooses its next sample stream randomly,
conditioned on the stream weights.
""" |
return self.rng.choice(self.n_streams,
p=(self.stream_weights_ /
self.weight_norm_)) |
<SYSTEM_TASK:>
Rotates through each active sampler by incrementing the index
<END_TASK>
<USER_TASK:>
Description:
def _next_sample_index(self):
"""Rotates through each active sampler by incrementing the index""" |
# Return the next streamer index where the streamer is not None,
# wrapping around.
idx = self.active_index_
self.active_index_ += 1
if self.active_index_ >= len(self.streams_):
self.active_index_ = 0
# Continue to increment if this streamer is exhausted (None)
# This should never be infinite looping;
# the `_streamers_available` check happens immediately
# before this, so there should always be at least one not-None
# streamer.
while self.streams_[idx] is None:
idx = self.active_index_
self.active_index_ += 1
if self.active_index_ >= len(self.streams_):
self.active_index_ = 0
return idx |
<SYSTEM_TASK:>
Activate a new stream, given the index into the stream pool.
<END_TASK>
<USER_TASK:>
Description:
def _new_stream(self, idx):
"""Activate a new stream, given the index into the stream pool.
BaseMux's _new_stream simply chooses a new stream and activates it.
For special behavior (ie Weighted streams), you must override this
in a child class.
Parameters
----------
idx : int, [0:n_streams - 1]
The stream index to replace
""" |
# Get the stream index from the candidate pool
stream_index = self.stream_idxs_[idx]
# Activate the Streamer, and get the weights
self.streams_[idx] = self.streamers[stream_index].iterate()
# Reset the sample count to zero
self.stream_counts_[idx] = 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.