text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add nested relationship count conditions to the query.
<END_TASK>
<USER_TASK:>
Description:
def _has_nested(self, relations, operator='>=', count=1, boolean='and', extra=None):
"""
Add nested relationship count conditions to the query.
:param relations: nested relations
:type relations: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder
""" |
relations = relations.split('.')
def closure(q):
if len(relations) > 1:
q.where_has(relations.pop(0), closure)
else:
q.has(relations.pop(0), operator, count, boolean, extra)
return self.where_has(relations.pop(0), closure) |
<SYSTEM_TASK:>
Add a relationship count to the query.
<END_TASK>
<USER_TASK:>
Description:
def doesnt_have(self, relation, boolean='and', extra=None):
"""
Add a relationship count to the query.
:param relation: The relation to count
:type relation: str
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder
""" |
return self.has(relation, '<', 1, boolean, extra) |
<SYSTEM_TASK:>
Add a relationship count condition to the query with where clauses.
<END_TASK>
<USER_TASK:>
Description:
def where_has(self, relation, extra, operator='>=', count=1):
"""
Add a relationship count condition to the query with where clauses.
:param relation: The relation to count
:type relation: str
:param extra: The extra query
:type extra: Builder or callable
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:rtype: Builder
""" |
return self.has(relation, operator, count, 'and', extra) |
<SYSTEM_TASK:>
Add a relationship count condition to the query with an "or".
<END_TASK>
<USER_TASK:>
Description:
def or_has(self, relation, operator='>=', count=1):
"""
Add a relationship count condition to the query with an "or".
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:rtype: Builder
""" |
return self.has(relation, operator, count, 'or') |
<SYSTEM_TASK:>
Add a relationship count condition to the query with where clauses and an "or".
<END_TASK>
<USER_TASK:>
Description:
def or_where_has(self, relation, extra, operator='>=', count=1):
"""
Add a relationship count condition to the query with where clauses and an "or".
:param relation: The relation to count
:type relation: str
:param extra: The extra query
:type extra: Builder or callable
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:rtype: Builder
""" |
return self.has(relation, operator, count, 'or', extra) |
<SYSTEM_TASK:>
Merge the "wheres" from the relation query to a has query.
<END_TASK>
<USER_TASK:>
Description:
def _merge_wheres_to_has(self, has_query, relation):
"""
Merge the "wheres" from the relation query to a has query.
:param has_query: The has query
:type has_query: Builder
:param relation: The relation to count
:type relation: eloquent.orm.relations.Relation
""" |
relation_query = relation.get_base_query()
has_query.merge_wheres(relation_query.wheres, relation_query.get_bindings())
self._query.merge_bindings(has_query.get_query()) |
<SYSTEM_TASK:>
Parse the nested relationship in a relation.
<END_TASK>
<USER_TASK:>
Description:
def _parse_nested(self, name, results):
"""
Parse the nested relationship in a relation.
:param name: The name of the relationship
:type name: str
:type results: dict
:rtype: dict
""" |
progress = []
for segment in name.split('.'):
progress.append(segment)
last = '.'.join(progress)
if last not in results:
results[last] = self.__class__(self.get_query().new_query())
return results |
<SYSTEM_TASK:>
Rollback the last migration operation.
<END_TASK>
<USER_TASK:>
Description:
def rollback(self, path, pretend=False):
"""
Rollback the last migration operation.
:param path: The path
:type path: str
:param pretend: Whether we execute the migrations as dry-run
:type pretend: bool
:rtype: int
""" |
self._notes = []
migrations = self._repository.get_last()
if not migrations:
self._note('<info>Nothing to rollback.</info>')
return len(migrations)
for migration in migrations:
self._run_down(path, migration, pretend)
return len(migrations) |
<SYSTEM_TASK:>
Try to find columns that only changed their names.
<END_TASK>
<USER_TASK:>
Description:
def detect_column_renamings(self, table_differences):
"""
Try to find columns that only changed their names.
:type table_differences: TableDiff
""" |
rename_candidates = {}
for added_column_name, added_column in table_differences.added_columns.items():
for removed_column in table_differences.removed_columns.values():
if len(self.diff_column(added_column, removed_column)) == 0:
if added_column.get_name() not in rename_candidates:
rename_candidates[added_column.get_name()] = []
rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name)
for candidate_columns in rename_candidates.values():
if len(candidate_columns) == 1:
removed_column, added_column, _ = candidate_columns[0]
removed_column_name = removed_column.get_name().lower()
added_column_name = added_column.get_name().lower()
if removed_column_name not in table_differences.renamed_columns:
table_differences.renamed_columns[removed_column_name] = added_column
del table_differences.added_columns[added_column_name]
del table_differences.removed_columns[removed_column_name] |
<SYSTEM_TASK:>
Returns the difference between column1 and column2
<END_TASK>
<USER_TASK:>
Description:
def diff_column(self, column1, column2):
"""
Returns the difference between column1 and column2
:type column1: eloquent.dbal.column.Column
:type column2: eloquent.dbal.column.Column
:rtype: list
""" |
properties1 = column1.to_dict()
properties2 = column2.to_dict()
changed_properties = []
for prop in ['type', 'notnull', 'unsigned', 'autoincrement']:
if properties1[prop] != properties2[prop]:
changed_properties.append(prop)
if properties1['default'] != properties2['default']\
or (properties1['default'] is None and properties2['default'] is not None)\
or (properties2['default'] is None and properties1['default'] is not None):
changed_properties.append('default')
if properties1['type'] == 'string' and properties1['type'] != 'guid'\
or properties1['type'] in ['binary', 'blob']:
length1 = properties1['length'] or 255
length2 = properties2['length'] or 255
if length1 != length2:
changed_properties.append('length')
if properties1['fixed'] != properties2['fixed']:
changed_properties.append('fixed')
elif properties1['type'] in ['decimal', 'float', 'double precision']:
precision1 = properties1['precision'] or 10
precision2 = properties2['precision'] or 10
if precision1 != precision2:
changed_properties.append('precision')
if properties1['scale'] != properties2['scale']:
changed_properties.append('scale')
return list(set(changed_properties)) |
<SYSTEM_TASK:>
Create a new model instance by type.
<END_TASK>
<USER_TASK:>
Description:
def _create_model_by_type(self, type):
"""
Create a new model instance by type.
:rtype: Model
""" |
klass = None
for cls in eloquent.orm.model.Model.__subclasses__():
morph_class = cls.__morph_class__ or cls.__name__
if morph_class == type:
klass = cls
break
return klass() |
<SYSTEM_TASK:>
Populate the placeholders in the migration stub.
<END_TASK>
<USER_TASK:>
Description:
def _populate_stub(self, name, stub, table):
"""
Populate the placeholders in the migration stub.
:param name: The name of the migration
:type name: str
:param stub: The stub
:type stub: str
:param table: The table name
:type table: str
:rtype: str
""" |
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub |
<SYSTEM_TASK:>
Disconnect from the given database and remove from local cache
<END_TASK>
<USER_TASK:>
Description:
def purge(self, name=None):
"""
Disconnect from the given database and remove from local cache
:param name: The name of the connection
:type name: str
:rtype: None
""" |
self.disconnect(name)
if name in self._connections:
del self._connections[name] |
<SYSTEM_TASK:>
Get all the primary keys for an array of models.
<END_TASK>
<USER_TASK:>
Description:
def get_keys(self, models, key=None):
"""
Get all the primary keys for an array of models.
:type models: list
:type key: str
:rtype: list
""" |
return list(set(map(lambda value: value.get_attribute(key) if key else value.get_key(), models))) |
<SYSTEM_TASK:>
Set the join clause for the query.
<END_TASK>
<USER_TASK:>
Description:
def _set_join(self, query=None):
"""
Set the join clause for the query.
""" |
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) |
<SYSTEM_TASK:>
Violinplots of the highest and lowest scoring of each modality
<END_TASK>
<USER_TASK:>
Description:
def plot_best_worst_fits(assignments_df, data, modality_col='Modality',
score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality""" |
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() |
<SYSTEM_TASK:>
Draw barplots grouped by modality of modality percentage per group
<END_TASK>
<USER_TASK:>
Description:
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
""" |
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine() |
<SYSTEM_TASK:>
Show the values underlying bayesian modality estimations of an event
<END_TASK>
<USER_TASK:>
Description:
def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
""" |
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter |
<SYSTEM_TASK:>
Assign the most likely modality given the fitted data
<END_TASK>
<USER_TASK:>
Description:
def predict(self, fitted):
"""Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature.
""" |
if fitted.shape[0] != len(self.modalities):
raise ValueError("This data doesn't look like it had the distance "
"between it and the five modalities calculated")
return fitted.idxmin() |
<SYSTEM_TASK:>
Calculate log-likelihood of a feature x for each model
<END_TASK>
<USER_TASK:>
Description:
def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
""" |
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)]) |
<SYSTEM_TASK:>
Convert floats to either integers or a nice looking fraction
<END_TASK>
<USER_TASK:>
Description:
def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction""" |
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number) |
<SYSTEM_TASK:>
Plot violins of each distribution in the model family
<END_TASK>
<USER_TASK:>
Description:
def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
""" |
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax |
<SYSTEM_TASK:>
Get log-likelihood of models at each parameterization for given data
<END_TASK>
<USER_TASK:>
Description:
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
""" |
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True) |
<SYSTEM_TASK:>
Get the modality assignments of each splicing event in the data
<END_TASK>
<USER_TASK:>
Description:
def fit(self, data):
"""Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
""" |
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors |
<SYSTEM_TASK:>
Guess the most likely modality for each event
<END_TASK>
<USER_TASK:>
Description:
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
""" |
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() |
<SYSTEM_TASK:>
Calculate log-likelihoods of each modality's parameterization
<END_TASK>
<USER_TASK:>
Description:
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
""" |
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks |
<SYSTEM_TASK:>
Get the log2 bayes factor of the fit for each modality
<END_TASK>
<USER_TASK:>
Description:
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality""" |
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series |
<SYSTEM_TASK:>
r"""Visualize all modality family members with parameters
<END_TASK>
<USER_TASK:>
Description:
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
""" |
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout() |
<SYSTEM_TASK:>
Given a list of bins, make a list of strings of those bin ranges
<END_TASK>
<USER_TASK:>
Description:
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
""" |
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] |
<SYSTEM_TASK:>
Makes a histogram of each column the provided binsize
<END_TASK>
<USER_TASK:>
Description:
def binify(data, bins):
"""Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
""" |
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned |
<SYSTEM_TASK:>
Kullback-Leiber divergence of two probability distributions pandas
<END_TASK>
<USER_TASK:>
Description:
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
""" |
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0) |
<SYSTEM_TASK:>
Finds the per-column JSD between dataframes p and q
<END_TASK>
<USER_TASK:>
Description:
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
""" |
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result |
<SYSTEM_TASK:>
Find the entropy of each column of a dataframe
<END_TASK>
<USER_TASK:>
Description:
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
""" |
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0) |
<SYSTEM_TASK:>
Binify and calculate jensen-shannon divergence between two dataframes
<END_TASK>
<USER_TASK:>
Description:
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
""" |
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series |
<SYSTEM_TASK:>
Jensen-Shannon divergence of features across phenotypes
<END_TASK>
<USER_TASK:>
Description:
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
""" |
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) |
<SYSTEM_TASK:>
Transform a tall JSD dataframe to a square matrix of mean JSDs
<END_TASK>
<USER_TASK:>
Description:
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
""" |
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1) |
<SYSTEM_TASK:>
Return the next value for a given sequence.
<END_TASK>
<USER_TASK:>
Description:
def get_next_value(
sequence_name='default', initial_value=1, reset_value=None,
*, nowait=False, using=None):
"""
Return the next value for a given sequence.
""" |
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last |
<SYSTEM_TASK:>
Check the status of all provided data and update the suite.
<END_TASK>
<USER_TASK:>
Description:
def check(self, final_line_count):
"""Check the status of all provided data and update the suite.""" |
if self._lines_seen["version"]:
self._process_version_lines()
self._process_plan_lines(final_line_count) |
<SYSTEM_TASK:>
Check if a plan is on a valid line.
<END_TASK>
<USER_TASK:>
Description:
def _plan_on_valid_line(self, at_line, final_line_count):
"""Check if a plan is on a valid line.""" |
# Put the common cases first.
if at_line == 1 or at_line == final_line_count:
return True
# The plan may only appear on line 2 if the version is at line 1.
after_version = (
self._lines_seen["version"]
and self._lines_seen["version"][0] == 1
and at_line == 2
)
if after_version:
return True
return False |
<SYSTEM_TASK:>
Handle a plan that contains a SKIP directive.
<END_TASK>
<USER_TASK:>
Description:
def handle_skipping_plan(self, skip_plan):
"""Handle a plan that contains a SKIP directive.""" |
skip_line = Result(True, None, skip_plan.directive.text, Directive("SKIP"))
self._suite.addTest(Adapter(self._filename, skip_line)) |
<SYSTEM_TASK:>
Add an error test to the suite.
<END_TASK>
<USER_TASK:>
Description:
def _add_error(self, message):
"""Add an error test to the suite.""" |
error_line = Result(False, None, message, Directive(""))
self._suite.addTest(Adapter(self._filename, error_line)) |
<SYSTEM_TASK:>
Format an exception as diagnostics output.
<END_TASK>
<USER_TASK:>
Description:
def format_exception(exception):
"""Format an exception as diagnostics output.
exception is the tuple as expected from sys.exc_info.
""" |
exception_lines = traceback.format_exception(*exception)
# The lines returned from format_exception do not strictly contain
# one line per element in the list (i.e. some elements have new
# line characters in the middle). Normalize that oddity.
lines = "".join(exception_lines).splitlines(True)
return format_as_diagnostics(lines) |
<SYSTEM_TASK:>
Generate tap.line.Line objects, given a file-like object `fh`.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, fh):
"""Generate tap.line.Line objects, given a file-like object `fh`.
`fh` may be any object that implements both the iterator and
context management protocol (i.e. it can be used in both a
"with" statement and a "for...in" statement.)
Trailing whitespace and newline characters will be automatically
stripped from the input lines.
""" |
with fh:
try:
first_line = next(fh)
except StopIteration:
return
first_parsed = self.parse_line(first_line.rstrip())
fh_new = itertools.chain([first_line], fh)
if first_parsed.category == "version" and first_parsed.version >= 13:
if ENABLE_VERSION_13:
fh_new = peekable(itertools.chain([first_line], fh))
self._try_peeking = True
else: # pragma no cover
print(
"""
WARNING: Optional imports not found, TAP 13 output will be
ignored. To parse yaml, see requirements in docs:
https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13"""
)
for line in fh_new:
yield self.parse_line(line.rstrip(), fh_new) |
<SYSTEM_TASK:>
Parse a line into whatever TAP category it belongs.
<END_TASK>
<USER_TASK:>
Description:
def parse_line(self, text, fh=None):
"""Parse a line into whatever TAP category it belongs.""" |
match = self.ok.match(text)
if match:
return self._parse_result(True, match, fh)
match = self.not_ok.match(text)
if match:
return self._parse_result(False, match, fh)
if self.diagnostic.match(text):
return Diagnostic(text)
match = self.plan.match(text)
if match:
return self._parse_plan(match)
match = self.bail.match(text)
if match:
return Bail(match.group("reason"))
match = self.version.match(text)
if match:
return self._parse_version(match)
return Unknown() |
<SYSTEM_TASK:>
Parse a matching plan line.
<END_TASK>
<USER_TASK:>
Description:
def _parse_plan(self, match):
"""Parse a matching plan line.""" |
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) |
<SYSTEM_TASK:>
Parse a matching result line into a result instance.
<END_TASK>
<USER_TASK:>
Description:
def _parse_result(self, ok, match, fh=None):
"""Parse a matching result line into a result instance.""" |
peek_match = None
try:
if fh is not None and self._try_peeking:
peek_match = self.yaml_block_start.match(fh.peek())
except StopIteration:
pass
if peek_match is None:
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
)
indent = peek_match.group("indent")
concat_yaml = self._extract_yaml_block(indent, fh)
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
raw_yaml_block=concat_yaml,
) |
<SYSTEM_TASK:>
Lazy load a yaml_block.
<END_TASK>
<USER_TASK:>
Description:
def yaml_block(self):
"""Lazy load a yaml_block.
If yaml support is not available,
there is an error in parsing the yaml block,
or no yaml is associated with this result,
``None`` will be returned.
:rtype: dict
""" |
if LOAD_YAML and self._yaml_block is not None:
try:
yaml_dict = yaml.load(self._yaml_block)
return yaml_dict
except yaml.error.YAMLError:
print("Error parsing yaml block. Check formatting.")
return None |
<SYSTEM_TASK:>
Load any files found into a suite.
<END_TASK>
<USER_TASK:>
Description:
def load(self, files):
"""Load any files found into a suite.
Any directories are walked and their files are added as TAP files.
:returns: A ``unittest.TestSuite`` instance
""" |
suite = unittest.TestSuite()
for filepath in files:
if os.path.isdir(filepath):
self._find_tests_in_directory(filepath, suite)
else:
suite.addTest(self.load_suite_from_file(filepath))
return suite |
<SYSTEM_TASK:>
Load a test suite with test lines from the provided TAP file.
<END_TASK>
<USER_TASK:>
Description:
def load_suite_from_file(self, filename):
"""Load a test suite with test lines from the provided TAP file.
:returns: A ``unittest.TestSuite`` instance
""" |
suite = unittest.TestSuite()
rules = Rules(filename, suite)
if not os.path.exists(filename):
rules.handle_file_does_not_exist()
return suite
line_generator = self._parser.parse_file(filename)
return self._load_lines(filename, line_generator, suite, rules) |
<SYSTEM_TASK:>
Load a test suite with test lines from the TAP stream on STDIN.
<END_TASK>
<USER_TASK:>
Description:
def load_suite_from_stdin(self):
"""Load a test suite with test lines from the TAP stream on STDIN.
:returns: A ``unittest.TestSuite`` instance
""" |
suite = unittest.TestSuite()
rules = Rules("stream", suite)
line_generator = self._parser.parse_stdin()
return self._load_lines("stream", line_generator, suite, rules) |
<SYSTEM_TASK:>
Load a suite with lines produced by the line generator.
<END_TASK>
<USER_TASK:>
Description:
def _load_lines(self, filename, line_generator, suite, rules):
"""Load a suite with lines produced by the line generator.""" |
line_counter = 0
for line in line_generator:
line_counter += 1
if line.category in self.ignored_lines:
continue
if line.category == "test":
suite.addTest(Adapter(filename, line))
rules.saw_test()
elif line.category == "plan":
if line.skip:
rules.handle_skipping_plan(line)
return suite
rules.saw_plan(line, line_counter)
elif line.category == "bail":
rules.handle_bail(line)
return suite
elif line.category == "version":
rules.saw_version_at(line_counter)
rules.check(line_counter)
return suite |
<SYSTEM_TASK:>
Keep track of which test cases have executed.
<END_TASK>
<USER_TASK:>
Description:
def _track(self, class_name):
"""Keep track of which test cases have executed.""" |
if self._test_cases.get(class_name) is None:
if self.streaming and self.header:
self._write_test_case_header(class_name, self.stream)
self._test_cases[class_name] = []
if self.combined:
self.combined_test_cases_seen.append(class_name) |
<SYSTEM_TASK:>
Notify the tracker how many total tests there will be.
<END_TASK>
<USER_TASK:>
Description:
def set_plan(self, total):
"""Notify the tracker how many total tests there will be.""" |
self.plan = total
if self.streaming:
# This will only write the plan if we haven't written it
# already but we want to check if we already wrote a
# test out (in which case we can't just write the plan out
# right here).
if not self.combined_test_cases_seen:
self._write_plan(self.stream)
elif not self.combined:
raise ValueError(
"set_plan can only be used with combined or streaming output"
) |
<SYSTEM_TASK:>
Generate TAP reports.
<END_TASK>
<USER_TASK:>
Description:
def generate_tap_reports(self):
"""Generate TAP reports.
The results are either combined into a single output file or
the output file name is generated from the test case.
""" |
# We're streaming but set_plan wasn't called, so we can only
# know the plan now (at the end).
if self.streaming and not self._plan_written:
print("1..{0}".format(self.combined_line_number), file=self.stream)
self._plan_written = True
return
if self.combined:
combined_file = "testresults.tap"
if self.outdir:
combined_file = os.path.join(self.outdir, combined_file)
with open(combined_file, "w") as out_file:
self._write_tap_version(out_file)
if self.plan is not None:
print("1..{0}".format(self.plan), file=out_file)
for test_case in self.combined_test_cases_seen:
self.generate_tap_report(
test_case, self._test_cases[test_case], out_file
)
if self.plan is None:
print("1..{0}".format(self.combined_line_number), file=out_file)
else:
for test_case, tap_lines in self._test_cases.items():
with open(self._get_tap_file_path(test_case), "w") as out_file:
self._write_tap_version(out_file)
self.generate_tap_report(test_case, tap_lines, out_file) |
<SYSTEM_TASK:>
Write the plan line to the stream.
<END_TASK>
<USER_TASK:>
Description:
def _write_plan(self, stream):
"""Write the plan line to the stream.
If we have a plan and have not yet written it out, write it to
the given stream.
""" |
if self.plan is not None:
if not self._plan_written:
print("1..{0}".format(self.plan), file=stream)
self._plan_written = True |
<SYSTEM_TASK:>
Entry point for ``tappy`` command.
<END_TASK>
<USER_TASK:>
Description:
def main(argv=sys.argv, stream=sys.stderr):
"""Entry point for ``tappy`` command.""" |
args = parse_args(argv)
suite = build_suite(args)
runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream)
result = runner.run(suite)
return get_status(result) |
<SYSTEM_TASK:>
Build a test suite by loading TAP files or a TAP stream.
<END_TASK>
<USER_TASK:>
Description:
def build_suite(args):
"""Build a test suite by loading TAP files or a TAP stream.""" |
loader = Loader()
if len(args.files) == 0 or args.files[0] == "-":
suite = loader.load_suite_from_stdin()
else:
suite = loader.load(args.files)
return suite |
<SYSTEM_TASK:>
Add a failure to the result.
<END_TASK>
<USER_TASK:>
Description:
def addFailure(self, result):
"""Add a failure to the result.""" |
result.addFailure(self, (Exception, Exception(), None))
# Since TAP will not provide assertion data, clean up the assertion
# section so it is not so spaced out.
test, err = result.failures[-1]
result.failures[-1] = (test, "") |
<SYSTEM_TASK:>
Event listener to recursively expire `left` and `right` attributes the
<END_TASK>
<USER_TASK:>
Description:
def after_flush_postexec(self, session, context):
"""
Event listener to recursively expire `left` and `right` attributes the
parents of all modified instances part of this flush.
""" |
instances = self.instances[session]
while instances:
instance = instances.pop()
if instance not in session:
continue
parent = self.get_parent_value(instance)
while parent != NO_VALUE and parent is not None:
instances.discard(parent)
session.expire(parent, ['left', 'right', 'tree_id', 'level'])
parent = self.get_parent_value(parent)
else:
session.expire(instance, ['left', 'right', 'tree_id', 'level'])
self.expire_session_for_children(session, instance) |
<SYSTEM_TASK:>
Moving one node of tree inside another
<END_TASK>
<USER_TASK:>
Description:
def move_inside(self, parent_id):
""" Moving one node of tree inside another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function`
""" | # noqa
session = Session.object_session(self)
self.parent_id = parent_id
self.mptt_move_inside = parent_id
session.add(self) |
<SYSTEM_TASK:>
Moving one node of tree after another
<END_TASK>
<USER_TASK:>
Description:
def move_after(self, node_id):
""" Moving one node of tree after another
For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function`
""" | # noqa
session = Session.object_session(self)
self.parent_id = self.parent_id
self.mptt_move_after = node_id
session.add(self) |
<SYSTEM_TASK:>
Moving one node of tree before another
<END_TASK>
<USER_TASK:>
Description:
def move_before(self, node_id):
""" Moving one node of tree before another
For example see:
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree`
* :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level`
""" | # noqa
session = Session.object_session(self)
table = _get_tree_table(self.__mapper__)
pk = getattr(table.c, self.get_pk_column().name)
node = session.query(table).filter(pk == node_id).one()
self.parent_id = node.parent_id
self.mptt_move_before = node_id
session.add(self) |
<SYSTEM_TASK:>
Node to the left of the current node at the same level
<END_TASK>
<USER_TASK:>
Description:
def leftsibling_in_level(self):
""" Node to the left of the current node at the same level
For example see
:mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level`
""" | # noqa
table = _get_tree_table(self.__mapper__)
session = Session.object_session(self)
current_lvl_nodes = session.query(table) \
.filter_by(level=self.level).filter_by(tree_id=self.tree_id) \
.filter(table.c.lft < self.left).order_by(table.c.lft).all()
if current_lvl_nodes:
return current_lvl_nodes[-1]
return None |
<SYSTEM_TASK:>
This method generate tree of current node table in dict or json
<END_TASK>
<USER_TASK:>
Description:
def get_tree(cls, session=None, json=False, json_fields=None, query=None):
""" This method generate tree of current node table in dict or json
format. You can make custom query with attribute ``query``. By default
it return all nodes in table.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session
Kwargs:
json (bool): if True return JSON jqTree format
json_fields (function): append custom fields in JSON
query (function): it takes :class:`sqlalchemy.orm.query.Query`
object as an argument, and returns in a modified form
::
def query(nodes):
return nodes.filter(node.__class__.tree_id.is_(node.tree_id))
node.get_tree(session=DBSession, json=True, query=query)
Example:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree`
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field`
""" | # noqa
tree = []
nodes_of_level = {}
# handle custom query
nodes = cls._base_query(session)
if query:
nodes = query(nodes)
nodes = cls._base_order(nodes).all()
# search minimal level of nodes.
min_level = min([node.level for node in nodes] or [None])
def get_node_id(node):
return getattr(node, node.get_pk_name())
for node in nodes:
result = cls._node_to_dict(node, json, json_fields)
parent_id = node.parent_id
if node.level != min_level: # for cildren
# Find parent in the tree
if parent_id not in nodes_of_level.keys():
continue
if 'children' not in nodes_of_level[parent_id]:
nodes_of_level[parent_id]['children'] = []
# Append node to parent
nl = nodes_of_level[parent_id]['children']
nl.append(result)
nodes_of_level[get_node_id(node)] = nl[-1]
else: # for top level nodes
tree.append(result)
nodes_of_level[get_node_id(node)] = tree[-1]
return tree |
<SYSTEM_TASK:>
This method generate a branch from a tree, begining with current
<END_TASK>
<USER_TASK:>
Description:
def drilldown_tree(self, session=None, json=False, json_fields=None):
""" This method generate a branch from a tree, begining with current
node.
For example:
node7.drilldown_tree()
.. code::
level Nested sets example
1 1(1)22 ---------------------
_______________|_________|_________ |
| | | | |
2 2(2)5 6(4)11 | 12(7)21 |
| ^ | ^ |
3 3(3)4 7(5)8 9(6)10 | 13(8)16 17(10)20 |
| | | |
4 | 14(9)15 18(11)19 |
| |
---------------------
Example in tests:
* :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_drilldown_tree`
""" |
if not session:
session = object_session(self)
return self.get_tree(
session,
json=json,
json_fields=json_fields,
query=self._drilldown_query
) |
<SYSTEM_TASK:>
Generate path from a leaf or intermediate node to the root.
<END_TASK>
<USER_TASK:>
Description:
def path_to_root(self, session=None, order=desc):
"""Generate path from a leaf or intermediate node to the root.
For example:
node11.path_to_root()
.. code::
level Nested sets example
-----------------------------------------
1 | 1(1)22 |
________|______|_____________________ |
| | | | |
| ------+--------- | |
2 2(2)5 6(4)11 | -- 12(7)21 |
| ^ | / \ |
3 3(3)4 7(5)8 9(6)10 ---/---- \ |
13(8)16 | 17(10)20 |
| | | |
4 14(9)15 | 18(11)19 |
| |
-------------
""" |
table = self.__class__
query = self._base_query_obj(session=session)
query = query.filter(table.is_ancestor_of(self, inclusive=True))
return self._base_order(query, order=order) |
<SYSTEM_TASK:>
Returns the number of dying at begining of age x
<END_TASK>
<USER_TASK:>
Description:
def dx(mt, x):
""" Returns the number of dying at begining of age x """ |
end_x_val = mt.lx.index(0)
if x < end_x_val:
return mt.lx[x] - mt.lx[x + 1]
else:
return 0.0 |
<SYSTEM_TASK:>
This function evaluates the APV of a geometrically increasing annual annuity-due
<END_TASK>
<USER_TASK:>
Description:
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """ |
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x) |
<SYSTEM_TASK:>
find new position of vertex v according to adjacency in prevlayer.
<END_TASK>
<USER_TASK:>
Description:
def _meanvalueattr(self,v):
"""
find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median.
""" |
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars) |
<SYSTEM_TASK:>
compute every node coordinates after converging to optimal ordering by N
<END_TASK>
<USER_TASK:>
Description:
def draw(self,N=1.5):
"""compute every node coordinates after converging to optimal ordering by N
rounds, and finally perform the edge routing.
""" |
while N>0.5:
for (l,mvmt) in self.ordering_step():
pass
N = N-1
if N>0:
for (l,mvmt) in self.ordering_step(oneway=True):
pass
self.setxy()
self.draw_edges() |
<SYSTEM_TASK:>
set rank value for vertex v and add it to the corresponding layer.
<END_TASK>
<USER_TASK:>
Description:
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
""" |
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v])) |
<SYSTEM_TASK:>
creates a DummyVertex at rank r inserted in the ctrl dict
<END_TASK>
<USER_TASK:>
Description:
def dummyctrl(self,r,ctrl):
"""creates a DummyVertex at rank r inserted in the ctrl dict
of the associated edge and layer.
Arguments:
r (int): rank value
ctrl (dict): the edge's control vertices
Returns:
DummyVertex : the created DummyVertex.
""" |
dv = DummyVertex(r)
dv.view.w,dv.view.h=self.dw,self.dh
self.grx[dv] = dv
dv.ctrl = ctrl
ctrl[r] = dv
self.layers[r].append(dv)
return dv |
<SYSTEM_TASK:>
creates and defines all needed dummy vertices for edge e.
<END_TASK>
<USER_TASK:>
Description:
def setdummies(self,e):
"""creates and defines all needed dummy vertices for edge e.
""" |
v0,v1 = e.v
r0,r1 = self.grx[v0].rank,self.grx[v1].rank
if r0>r1:
assert e in self.alt_e
v0,v1 = v1,v0
r0,r1 = r1,r0
if (r1-r0)>1:
# "dummy vertices" are stored in the edge ctrl dict,
# keyed by their rank in layers.
ctrl=self.ctrls[e]={}
ctrl[r0]=v0
ctrl[r1]=v1
for r in xrange(r0+1,r1):
self.dummyctrl(r,ctrl) |
<SYSTEM_TASK:>
performs vertical alignment according to current dirvh internal state.
<END_TASK>
<USER_TASK:>
Description:
def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
""" |
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m |
<SYSTEM_TASK:>
Load PNGs with stimulus information for pRF model creation.
<END_TASK>
<USER_TASK:>
Description:
def load_png(varNumVol, strPathPng, tplVslSpcSze=(200, 200), varStrtIdx=0,
varZfill=3):
"""
Load PNGs with stimulus information for pRF model creation.
Parameters
----------
varNumVol : int
Number of PNG files.
strPathPng : str
Parent directory of PNG files. PNG files need to be organsied in
numerical order (e.g. `file_001.png`, `file_002.png`, etc.).
tplVslSpcSze : tuple
Pixel size (x, y) at which PNGs are sampled. In case of large PNGs it
is useful to sample at a lower than the original resolution.
varStrtIdx : int
Start index of PNG files. For instance, `varStrtIdx = 0` if the name of
the first PNG file is `file_000.png`, or `varStrtIdx = 1` if it is
`file_001.png`.
varZfill : int
Zero padding of PNG file names. For instance, `varStrtIdx = 3` if the
name of PNG files is `file_007.png`, or `varStrtIdx = 4` if it is
`file_0007.png`.
Returns
-------
aryPngData : np.array
3D Numpy array with the following structure:
aryPngData[x-pixel-index, y-pixel-index, PngNumber]
Notes
-----
Part of py_pRF_mapping library.
""" |
# Create list of png files to load:
lstPngPaths = [None] * varNumVol
for idx01 in range(0, varNumVol):
lstPngPaths[idx01] = (strPathPng +
str(idx01 + varStrtIdx).zfill(varZfill) +
'.png')
# The png data will be saved in a numpy array of the following order:
# aryPngData[x-pixel, y-pixel, PngNumber].
aryPngData = np.zeros((tplVslSpcSze[0],
tplVslSpcSze[1],
varNumVol))
# Open first image in order to check dimensions (greyscale or RGB, i.e. 2D
# or 3D).
objIm = Image.open(lstPngPaths[0])
aryTest = np.array(objIm.resize((objIm.size[0], objIm.size[1]),
Image.ANTIALIAS))
varNumDim = aryTest.ndim
del(aryTest)
# Loop trough PNG files:
for idx01 in range(0, varNumVol):
# Old version of reading images with scipy
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :, 0]
# aryPngData[:, :, idx01] = sp.misc.imread(lstPngPaths[idx01])[:, :]
# Load & resize image:
objIm = Image.open(lstPngPaths[idx01])
objIm = objIm.resize((tplVslSpcSze[0],
tplVslSpcSze[1]),
resample=Image.NEAREST)
# Casting of array depends on dimensionality (greyscale or RGB, i.e. 2D
# or 3D):
if varNumDim == 2:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :]
elif varNumDim == 3:
aryPngData[:, :, idx01] = np.array(objIm.resize(
(objIm.size[0], objIm.size[1]), Image.ANTIALIAS))[:, :, 0]
else:
# Error message:
strErrMsg = ('ERROR: PNG files for model creation need to be RGB '
+ 'or greyscale.')
raise ValueError(strErrMsg)
# Convert RGB values (0 to 255) to integer ones and zeros:
aryPngData = (aryPngData > 200).astype(np.int8)
return aryPngData |
<SYSTEM_TASK:>
Load information from event text file.
<END_TASK>
<USER_TASK:>
Description:
def load_ev_txt(strPthEv):
"""Load information from event text file.
Parameters
----------
input1 : str
Path to event text file
Returns
-------
aryEvTxt : 2d numpy array, shape [n_measurements, 3]
Array with info about conditions: type, onset, duration
Notes
-----
Part of py_pRF_mapping library.
""" |
aryEvTxt = np.loadtxt(strPthEv, dtype='float', comments='#', delimiter=' ',
skiprows=0, usecols=(0, 1, 2))
return aryEvTxt |
<SYSTEM_TASK:>
Apply status mapping to a raw API result.
<END_TASK>
<USER_TASK:>
Description:
def adjust_status(info: dict) -> dict:
"""Apply status mapping to a raw API result.""" |
modified_info = deepcopy(info)
modified_info.update({
'level':
get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])),
'level2':
STATUS_MAP[99] if info['level2'] is None else
get_nearest_by_numeric_key(STATUS_MAP, int(info['level2']))
})
return modified_info |
<SYSTEM_TASK:>
Return the CDC status for the specified state.
<END_TASK>
<USER_TASK:>
Description:
async def status_by_state(self, state: str) -> dict:
"""Return the CDC status for the specified state.""" |
data = await self.raw_cdc_data()
try:
info = next((v for k, v in data.items() if state in k))
except StopIteration:
return {}
return adjust_status(info) |
<SYSTEM_TASK:>
Returns the Exception class and the message of the exception as string.
<END_TASK>
<USER_TASK:>
Description:
def brief_exception_text(exception, secret_values):
"""
Returns the Exception class and the message of the exception as string.
:param exception: The exception to format
:param secret_values: Values to hide in output
""" |
exception_text = _hide_secret_values(str(exception), secret_values)
return '[{}]\n{}'.format(type(exception).__name__, exception_text) |
<SYSTEM_TASK:>
Prints the exception message and the name of the exception class to stderr.
<END_TASK>
<USER_TASK:>
Description:
def print_exception(exception, secret_values=None):
"""
Prints the exception message and the name of the exception class to stderr.
:param exception: The exception to print
:param secret_values: Values to hide in output
""" |
print(brief_exception_text(exception, secret_values), file=sys.stderr) |
<SYSTEM_TASK:>
Saves the Document to the database if it is valid.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, **kwargs):
"""
Saves the Document to the database if it is valid.
Returns errors otherwise.
""" |
if self.is_valid:
before = self.before_insert()
if before:
return before
try:
self._document['_id'] = self.insert_one(self._document)
self.after_insert()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get('err', 'PyMongoError.')
),
operation='insert', collection=type(self).__name__,
document=self._document,
)
return self._errors |
<SYSTEM_TASK:>
Updates the document with the given _id saved in the collection if it
<END_TASK>
<USER_TASK:>
Description:
def update(self, **kwargs):
"""
Updates the document with the given _id saved in the collection if it
is valid.
Returns errors otherwise.
""" |
if self.is_valid:
if '_id' in self._document:
to_update = self.find_one({'_id': self._id})
if to_update:
before = self.before_update(old=to_update)
if before:
return before
try:
self.replace_one({'_id': self._id}, self._document)
self.after_update(old=to_update)
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='update', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
)
return self._errors |
<SYSTEM_TASK:>
Deletes the document if it is saved in the collection.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, **kwargs):
"""
Deletes the document if it is saved in the collection.
""" |
if self.is_valid:
if '_id' in self._document:
to_delete = self.find_one({'_id': self._id})
if to_delete:
before = self.before_delete()
if before:
return before
try:
self.delete_one({'_id': self._id})
self.after_delete()
return self._document
except PyMongoException as exc:
return PyMongoError(
error_message=exc.details.get(
'errmsg', exc.details.get(
'err', 'PyMongoError.'
)
),
operation='delete', collection=type(self).__name__,
document=self._document,
)
else:
return DocumentNotFoundError(type(self).__name__, self._id)
else:
return UnidentifiedDocumentError(
type(self).__name__, self._document
) |
<SYSTEM_TASK:>
Returns one document dict if one passes the filter.
<END_TASK>
<USER_TASK:>
Description:
def find_one(cls, filter=None, *args, **kwargs):
"""
Returns one document dict if one passes the filter.
Returns None otherwise.
""" |
return cls.collection.find_one(filter, *args, **kwargs) |
<SYSTEM_TASK:>
Returns all document dicts that pass the filter
<END_TASK>
<USER_TASK:>
Description:
def find(cls, *args, **kwargs):
"""
Returns all document dicts that pass the filter
""" |
return list(cls.collection.find(*args, **kwargs)) |
<SYSTEM_TASK:>
Returns the document dicts returned from the Aggregation Pipeline
<END_TASK>
<USER_TASK:>
Description:
def aggregate(cls, pipeline=None, **kwargs):
"""
Returns the document dicts returned from the Aggregation Pipeline
""" |
return list(cls.collection.aggregate(pipeline or [], **kwargs)) |
<SYSTEM_TASK:>
Inserts a list of documents into the Collection and returns their _ids
<END_TASK>
<USER_TASK:>
Description:
def insert_many(cls, documents, ordered=True):
"""
Inserts a list of documents into the Collection and returns their _ids
""" |
return cls.collection.insert_many(documents, ordered).inserted_ids |
<SYSTEM_TASK:>
Updates a document that passes the filter with the update value
<END_TASK>
<USER_TASK:>
Description:
def update_one(cls, filter, update, upsert=False):
"""
Updates a document that passes the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
""" |
return cls.collection.update_one(filter, update, upsert).raw_result |
<SYSTEM_TASK:>
Updates all documents that pass the filter with the update value
<END_TASK>
<USER_TASK:>
Description:
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
""" |
return cls.collection.update_many(filter, update, upsert).raw_result |
<SYSTEM_TASK:>
Replaces a document that passes the filter.
<END_TASK>
<USER_TASK:>
Description:
def replace_one(cls, filter, replacement, upsert=False):
"""
Replaces a document that passes the filter.
Will upsert a new document if upsert=True and no document is filtered
""" |
return cls.collection.replace_one(
filter, replacement, upsert
).raw_result |
<SYSTEM_TASK:>
Returns a Document if any document is filtered, returns None otherwise
<END_TASK>
<USER_TASK:>
Description:
def get(cls, filter=None, **kwargs):
"""
Returns a Document if any document is filtered, returns None otherwise
""" |
document = cls(cls.find_one(filter, **kwargs))
return document if document.document else None |
<SYSTEM_TASK:>
Returns a list of Documents if any document is filtered
<END_TASK>
<USER_TASK:>
Description:
def documents(cls, filter=None, **kwargs):
"""
Returns a list of Documents if any document is filtered
""" |
documents = [cls(document) for document in cls.find(filter, **kwargs)]
return [document for document in documents if document.document] |
<SYSTEM_TASK:>
Returns an iterator over all of the statements belonging to a file.
<END_TASK>
<USER_TASK:>
Description:
def in_file(self, fn: str) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements belonging to a file.
""" |
yield from self.__file_to_statements.get(fn, []) |
<SYSTEM_TASK:>
Returns an iterator over all of the statements located at a given line.
<END_TASK>
<USER_TASK:>
Description:
def at_line(self, line: FileLine) -> Iterator[Statement]:
"""
Returns an iterator over all of the statements located at a given line.
""" |
num = line.num
for stmt in self.in_file(line.filename):
if stmt.location.start.line == num:
yield stmt |
<SYSTEM_TASK:>
Wrap multiple paragraphs of text, returning a list of wrapped lines.
<END_TASK>
<USER_TASK:>
Description:
def wrap(text, width=70, **kwargs):
"""Wrap multiple paragraphs of text, returning a list of wrapped lines.
Reformat the multiple paragraphs 'text' so they fit in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See ParagraphWrapper class for available keyword args to customize
wrapping behaviour.
""" |
w = ParagraphWrapper(width=width, **kwargs)
return w.wrap(text) |
<SYSTEM_TASK:>
Fill multiple paragraphs of text, returning a new string.
<END_TASK>
<USER_TASK:>
Description:
def fill(text, width=70, **kwargs):
"""Fill multiple paragraphs of text, returning a new string.
Reformat multiple paragraphs in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped text. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See ParagraphWrapper class for
available keyword args to customize wrapping behaviour.
""" |
w = ParagraphWrapper(width=width, **kwargs)
return w.fill(text) |
<SYSTEM_TASK:>
Creates the output directory if not existing.
<END_TASK>
<USER_TASK:>
Description:
def prepare_outdir(outdir):
"""
Creates the output directory if not existing.
If outdir is None or if no output_files are provided nothing happens.
:param outdir: The output directory to create.
""" |
if outdir:
outdir = os.path.expanduser(outdir)
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except os.error as e:
raise JobExecutionError('Failed to create outdir "{}".\n{}'.format(outdir, str(e))) |
<SYSTEM_TASK:>
Decorates a ``schematics.Model`` class to add it as a field
<END_TASK>
<USER_TASK:>
Description:
def model_node(**kwargs):
"""
Decorates a ``schematics.Model`` class to add it as a field
of type ``schematic.types.ModelType``.
Keyword arguments are passed to ``schematic.types.ModelType``.
Example:
.. code-block:: python
:emphasize-lines: 8,13
from schematics import Model, types
from rafter.contrib.schematics.helpers import model_node
class MyModel(Model):
name = types.StringType()
@model_node()
class options(Model):
status = types.IntType()
# With arguments and another name
@model_node(serialized_name='extra', required=True)
class _extra(Model):
test = types.StringType()
""" |
kwargs.setdefault('default', {})
def decorator(model):
return types.ModelType(model, **kwargs)
return decorator |
<SYSTEM_TASK:>
Removes the write permissions for the given file for owner, groups and others.
<END_TASK>
<USER_TASK:>
Description:
def make_file_read_only(file_path):
"""
Removes the write permissions for the given file for owner, groups and others.
:param file_path: The file whose privileges are revoked.
:raise FileNotFoundError: If the given file does not exist.
""" |
old_permissions = os.stat(file_path).st_mode
os.chmod(file_path, old_permissions & ~WRITE_PERMISSIONS) |
<SYSTEM_TASK:>
Get symptom data for the provided ZIP code.
<END_TASK>
<USER_TASK:>
Description:
async def status_by_zip(self, zip_code: str) -> dict:
"""Get symptom data for the provided ZIP code.""" |
try:
location = next((
d for d in await self.user_reports()
if d['zip'] == zip_code))
except StopIteration:
return {}
return await self.status_by_coordinates(
float(location['latitude']), float(location['longitude'])) |
Subsets and Splits