text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_modifiers(self, sql, blueprint, column):
""" Add the column modifiers to the deifinition """ |
for modifier in self._modifiers:
method = '_modify_%s' % modifier
if hasattr(self, method):
sql += getattr(self, method)(blueprint, column)
return sql |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clean_pivot_attributes(self, model):
""" Get the pivot attributes from a model. :type model: eloquent.Model """ |
values = {}
delete_keys = []
for key, value in model.get_attributes().items():
if key.find('pivot_') == 0:
values[key[6:]] = value
delete_keys.append(key)
for key in delete_keys:
delattr(model, key)
return values |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_relation_count_query_for_self_join(self, query, parent):
""" Add the constraints for a relationship count query on the same table. :type query: eloquent.orm.Builder :type parent: eloquent.orm.Builder :rtype: eloquent.orm.Builder """ |
query.select(QueryExpression('COUNT(*)'))
table_prefix = self._query.get_query().get_connection().get_table_prefix()
hash_ = self.get_relation_count_hash()
query.from_('%s AS %s%s' % (self._table, table_prefix, hash_))
key = self.wrap(self.get_qualified_parent_key_name())
return query.where('%s.%s' % (hash_, self._foreign_key), '=', QueryExpression(key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_aliased_pivot_columns(self):
""" Get the pivot columns for the relation. :rtype: list """ |
defaults = [self._foreign_key, self._other_key]
columns = []
for column in defaults + self._pivot_columns:
value = '%s.%s AS pivot_%s' % (self._table, column, column)
if value not in columns:
columns.append('%s.%s AS pivot_%s' % (self._table, column, column))
return columns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_join(self, query=None):
""" Set the join clause for the relation query. :param query: The query builder :type query: eloquent.orm.Builder :return: self :rtype: BelongsToMany """ |
if not query:
query = self._query
base_table = self._related.get_table()
key = '%s.%s' % (base_table, self._related.get_key_name())
query.join(self._table, key, '=', self.get_other_key())
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, model, joining=None, touch=True):
""" Save a new model and attach it to the parent model. :type model: eloquent.Model :type joining: dict :type touch: bool :rtype: eloquent.Model """ |
if joining is None:
joining = {}
model.save({'touch': False})
self.attach(model.get_key(), joining, touch)
return model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _attach_new(self, records, current, touch=True):
""" Attach all of the IDs that aren't in the current dict. """ |
changes = {
'attached': [],
'updated': []
}
for id, attributes in records.items():
if id not in current:
self.attach(id, attributes, touch)
changes['attached'].append(id)
elif len(attributes) > 0 and self.update_existing_pivot(id, attributes, touch):
changes['updated'].append(id)
return changes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_existing_pivot(self, id, attributes, touch=True):
""" Update an existing pivot record on the table. """ |
if self.updated_at() in self._pivot_columns:
attributes = self.set_timestamps_on_attach(attributes, True)
updated = self._new_picot_statement_for_id(id).update(attributes)
if touch:
self.touch_if_touching()
return updated |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join(self, table, one=None, operator=None, two=None, type='inner', where=False):
""" Add a join clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :param type: The join type :type type: str :param where: Whether to use a "where" rather than a "on" :type where: bool :return: The current QueryBuilder instance :rtype: QueryBuilder """ |
if isinstance(table, JoinClause):
self.joins.append(table)
else:
if one is None:
raise ArgumentError('Missing "one" argument')
join = JoinClause(table, type)
self.joins.append(join.on(
one, operator, two, 'and', where
))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join_where(self, table, one, operator, two, type='inner'):
""" Add a "join where" clause to the query :param table: The table to join with, can also be a JoinClause instance :type table: str or JoinClause :param one: The first column of the join condition :type one: str :param operator: The operator of the join condition :type operator: str :param two: The second column of the join condition :type two: str :param type: The join type :type type: str :return: The current QueryBuilder instance :rtype: QueryBuilder """ |
return self.join(table, one, operator, two, type, True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _where_in_sub(self, column, query, boolean, negate=False):
""" Add a where in with a sub select to the query :param column: The column :type column: str :param query: A QueryBuilder instance :type query: QueryBuilder :param boolean: The boolean operator :type boolean: str :param negate: Whether it is a not where in :param negate: bool :return: The current QueryBuilder instance :rtype: QueryBuilder """ |
if negate:
type = 'not_in_sub'
else:
type = 'in_sub'
self.wheres.append({
'type': type,
'column': column,
'query': query,
'boolean': boolean
})
self.merge_bindings(query)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def union(self, query, all=False):
""" Add a union statement to the query :param query: A QueryBuilder instance :type query: QueryBuilder :param all: Whether it is a "union all" statement :type all: bool :return: The query :rtype: QueryBuilder """ |
self.unions.append({
'query': query,
'all': all
})
return self.merge_bindings(query) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(self, id, columns=None):
""" Execute a query for a single record by id :param id: The id of the record to retrieve :type id: mixed :param columns: The columns of the record to retrive :type columns: list :return: mixed :rtype: mixed """ |
if not columns:
columns = ['*']
return self.where('id', '=', id).first(1, columns) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fresh(self, columns=None):
""" Execute the query as a fresh "select" statement :param columns: The columns to get :type columns: list :return: The result :rtype: list """ |
if not columns:
columns = ['*']
if not self.columns:
self.columns = columns
return self._processor.process_select(self, self._run_select()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_list_select(self, column, key=None):
""" Get the columns that should be used in a list :param column: The column to get the values for :type column: str :param key: The key :type key: str :return: The list of values :rtype: list """ |
if key is None:
elements = [column]
else:
elements = [column, key]
select = []
for elem in elements:
dot = elem.find('.')
if dot >= 0:
select.append(column[dot + 1:])
else:
select.append(elem)
return select |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, id=None):
""" Delete a record from the database :param id: The id of the row to delete :type id: mixed :return: The number of rows deleted :rtype: int """ |
if id is not None:
self.where('id', '=', id)
sql = self._grammar.compile_delete(self)
return self._connection.delete(sql, self.get_bindings()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_wheres(self, wheres, bindings):
""" Merge a list of where clauses and bindings :param wheres: A list of where clauses :type wheres: list :param bindings: A list of bindings :type bindings: list :rtype: None """ |
self.wheres = self.wheres + wheres
self._bindings['where'] = self._bindings['where'] + bindings |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove(self, builder, model):
""" Remove the scope from a given query builder. :param builder: The query builder :type builder: eloquent.orm.builder.Builder :param model: The model :type model: eloquent.orm.Model """ |
column = model.get_qualified_deleted_at_column()
query = builder.get_query()
wheres = []
for where in query.wheres:
# If the where clause is a soft delete date constraint,
# we will remove it from the query and reset the keys
# on the wheres. This allows the developer to include
# deleted model in a relationship result set that is lazy loaded.
if not self._is_soft_delete_constraint(where, column):
wheres.append(where)
query.wheres = wheres |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend(self, builder):
""" Extend the query builder with the needed functions. :param builder: The query builder :type builder: eloquent.orm.builder.Builder """ |
for extension in self._extensions:
getattr(self, '_add_%s' % extension)(builder)
builder.on_delete(self._on_delete) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _only_trashed(self, builder):
""" The only-trashed extension. :param builder: The query builder :type builder: eloquent.orm.builder.Builder """ |
model = builder.get_model()
self.remove(builder, model)
builder.get_query().where_not_null(model.get_qualified_deleted_at_column()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def big_integer(self, column, auto_increment=False, unsigned=False):
""" Create a new big integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent """ |
return self._add_column('big_integer', column,
auto_increment=auto_increment,
unsigned=unsigned) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def medium_integer(self, column, auto_increment=False, unsigned=False):
""" Create a new medium integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent """ |
return self._add_column('medium_integer', column,
auto_increment=auto_increment,
unsigned=unsigned) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tiny_integer(self, column, auto_increment=False, unsigned=False):
""" Create a new tiny integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent """ |
return self._add_column('tiny_integer', column,
auto_increment=auto_increment,
unsigned=unsigned) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_column(self, type, name, **parameters):
""" Add a new column to the blueprint. :param type: The column type :type type: str :param name: The column name :type name: str :param parameters: The column parameters :type parameters: dict :rtype: Fluent """ |
parameters.update({
'type': type,
'name': name
})
column = Fluent(**parameters)
self._columns.append(column)
return column |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dissociate(self):
""" Dissociate previously associated model from the given parent. :rtype: eloquent.Model """ |
self._parent.set_attribute(self._foreign_key, None)
return self._parent.set_relation(self._relation, None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_relation_value(self, dictionary, key, type):
""" Get the value of the relationship by one or many type. :type dictionary: dict :type key: str :type type: str """ |
value = dictionary[key]
if type == 'one':
return value[0]
return self._related.new_collection(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def first_or_create(self, _attributes=None, **attributes):
""" Get the first related record matching the attributes or create it. :param attributes: The attributes :type attributes: dict :rtype: Model """ |
if _attributes is not None:
attributes.update(_attributes)
instance = self.where(attributes).first()
if instance is None:
instance = self.create(**attributes)
return instance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eager_load_relations(self, models):
""" Eager load the relationship of the models. :param models: :type models: list :return: The models :rtype: list """ |
for name, constraints in self._eager_load.items():
if name.find('.') == -1:
models = self._load_relation(models, name, constraints)
return models |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_relation(self, models, name, constraints):
""" Eagerly load the relationship on a set of models. :rtype: list """ |
relation = self.get_relation(name)
relation.add_eager_constraints(models)
if callable(constraints):
constraints(relation)
else:
relation.merge_query(constraints)
models = relation.init_relation(models, name)
results = relation.get_eager()
return relation.match(models, results, name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _has_nested(self, relations, operator='>=', count=1, boolean='and', extra=None):
""" Add nested relationship count conditions to the query. :param relations: nested relations :type relations: str :param operator: The operator :type operator: str :param count: The count :type count: int :param boolean: The boolean value :type boolean: str :param extra: The extra query :type extra: Builder or callable :rtype: Builder """ |
relations = relations.split('.')
def closure(q):
if len(relations) > 1:
q.where_has(relations.pop(0), closure)
else:
q.has(relations.pop(0), operator, count, boolean, extra)
return self.where_has(relations.pop(0), closure) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def doesnt_have(self, relation, boolean='and', extra=None):
""" Add a relationship count to the query. :param relation: The relation to count :type relation: str :param boolean: The boolean value :type boolean: str :param extra: The extra query :type extra: Builder or callable :rtype: Builder """ |
return self.has(relation, '<', 1, boolean, extra) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def where_has(self, relation, extra, operator='>=', count=1):
""" Add a relationship count condition to the query with where clauses. :param relation: The relation to count :type relation: str :param extra: The extra query :type extra: Builder or callable :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder """ |
return self.has(relation, operator, count, 'and', extra) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def or_has(self, relation, operator='>=', count=1):
""" Add a relationship count condition to the query with an "or". :param relation: The relation to count :type relation: str :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder """ |
return self.has(relation, operator, count, 'or') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def or_where_has(self, relation, extra, operator='>=', count=1):
""" Add a relationship count condition to the query with where clauses and an "or". :param relation: The relation to count :type relation: str :param extra: The extra query :type extra: Builder or callable :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder """ |
return self.has(relation, operator, count, 'or', extra) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _merge_wheres_to_has(self, has_query, relation):
""" Merge the "wheres" from the relation query to a has query. :param has_query: The has query :type has_query: Builder :param relation: The relation to count :type relation: eloquent.orm.relations.Relation """ |
relation_query = relation.get_base_query()
has_query.merge_wheres(relation_query.wheres, relation_query.get_bindings())
self._query.merge_bindings(has_query.get_query()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_has_relation_query(self, relation):
""" Get the "has" relation base query :type relation: str :rtype: Builder """ |
from .relations import Relation
return Relation.no_constraints(
lambda: getattr(self.get_model(), relation)()
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_nested(self, name, results):
""" Parse the nested relationship in a relation. :param name: The name of the relationship :type name: str :type results: dict :rtype: dict """ |
progress = []
for segment in name.split('.'):
progress.append(segment)
last = '.'.join(progress)
if last not in results:
results[last] = self.__class__(self.get_query().new_query())
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rollback(self, path, pretend=False):
""" Rollback the last migration operation. :param path: The path :type path: str :param pretend: Whether we execute the migrations as dry-run :type pretend: bool :rtype: int """ |
self._notes = []
migrations = self._repository.get_last()
if not migrations:
self._note('<info>Nothing to rollback.</info>')
return len(migrations)
for migration in migrations:
self._run_down(path, migration, pretend)
return len(migrations) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_column_renamings(self, table_differences):
""" Try to find columns that only changed their names. :type table_differences: TableDiff """ |
rename_candidates = {}
for added_column_name, added_column in table_differences.added_columns.items():
for removed_column in table_differences.removed_columns.values():
if len(self.diff_column(added_column, removed_column)) == 0:
if added_column.get_name() not in rename_candidates:
rename_candidates[added_column.get_name()] = []
rename_candidates[added_column.get_name()] = (removed_column, added_column, added_column_name)
for candidate_columns in rename_candidates.values():
if len(candidate_columns) == 1:
removed_column, added_column, _ = candidate_columns[0]
removed_column_name = removed_column.get_name().lower()
added_column_name = added_column.get_name().lower()
if removed_column_name not in table_differences.renamed_columns:
table_differences.renamed_columns[removed_column_name] = added_column
del table_differences.added_columns[added_column_name]
del table_differences.removed_columns[removed_column_name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff_column(self, column1, column2):
""" Returns the difference between column1 and column2 :type column1: eloquent.dbal.column.Column :type column2: eloquent.dbal.column.Column :rtype: list """ |
properties1 = column1.to_dict()
properties2 = column2.to_dict()
changed_properties = []
for prop in ['type', 'notnull', 'unsigned', 'autoincrement']:
if properties1[prop] != properties2[prop]:
changed_properties.append(prop)
if properties1['default'] != properties2['default']\
or (properties1['default'] is None and properties2['default'] is not None)\
or (properties2['default'] is None and properties1['default'] is not None):
changed_properties.append('default')
if properties1['type'] == 'string' and properties1['type'] != 'guid'\
or properties1['type'] in ['binary', 'blob']:
length1 = properties1['length'] or 255
length2 = properties2['length'] or 255
if length1 != length2:
changed_properties.append('length')
if properties1['fixed'] != properties2['fixed']:
changed_properties.append('fixed')
elif properties1['type'] in ['decimal', 'float', 'double precision']:
precision1 = properties1['precision'] or 10
precision2 = properties2['precision'] or 10
if precision1 != precision2:
changed_properties.append('precision')
if properties1['scale'] != properties2['scale']:
changed_properties.append('scale')
return list(set(changed_properties)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_model_by_type(self, type):
""" Create a new model instance by type. :rtype: Model """ |
klass = None
for cls in eloquent.orm.model.Model.__subclasses__():
morph_class = cls.__morph_class__ or cls.__name__
if morph_class == type:
klass = cls
break
return klass() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _populate_stub(self, name, stub, table):
""" Populate the placeholders in the migration stub. :param name: The name of the migration :type name: str :param stub: The stub :type stub: str :param table: The table name :type table: str :rtype: str """ |
stub = stub.replace('DummyClass', self._get_class_name(name))
if table is not None:
stub = stub.replace('dummy_table', table)
return stub |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def purge(self, name=None):
""" Disconnect from the given database and remove from local cache :param name: The name of the connection :type name: str :rtype: None """ |
self.disconnect(name)
if name in self._connections:
del self._connections[name] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_keys(self, models, key=None):
""" Get all the primary keys for an array of models. :type models: list :type key: str :rtype: list """ |
return list(set(map(lambda value: value.get_attribute(key) if key else value.get_key(), models))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_join(self, query=None):
""" Set the join clause for the query. """ |
if not query:
query = self._query
foreign_key = '%s.%s' % (self._related.get_table(), self._second_key)
query.join(self._parent.get_table(), self.get_qualified_parent_key_name(), '=', foreign_key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_best_worst_fits(assignments_df, data, modality_col='Modality', score='$\log_2 K$'):
"""Violinplots of the highest and lowest scoring of each modality""" |
ncols = 2
nrows = len(assignments_df.groupby(modality_col).groups.keys())
fig, axes = plt.subplots(nrows=nrows, ncols=ncols,
figsize=(nrows*4, ncols*6))
axes_iter = axes.flat
fits = 'Highest', 'Lowest'
for modality, df in assignments_df.groupby(modality_col):
df = df.sort_values(score)
color = MODALITY_TO_COLOR[modality]
for fit in fits:
if fit == 'Highest':
ids = df['Feature ID'][-10:]
else:
ids = df['Feature ID'][:10]
fit_psi = data[ids]
tidy_fit_psi = fit_psi.stack().reset_index()
tidy_fit_psi = tidy_fit_psi.rename(columns={'level_0': 'Sample ID',
'level_1':
'Feature ID',
0: '$\Psi$'})
if tidy_fit_psi.empty:
continue
ax = six.next(axes_iter)
violinplot(x='Feature ID', y='$\Psi$', data=tidy_fit_psi,
color=color, ax=ax)
ax.set(title='{} {} {}'.format(fit, score, modality), xticks=[])
sns.despine()
fig.tight_layout() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
"""Draw barplots grouped by modality of modality percentage per group Parameters Returns ------- Raises ------ """ |
if percentages:
counts = 100 * (counts.T / counts.T.sum()).T
# with sns.set(style='whitegrid'):
if ax is None:
ax = plt.gca()
full_width = 0.8
width = full_width / counts.shape[0]
for i, (group, series) in enumerate(counts.iterrows()):
left = np.arange(len(self.modality_order)) + i * width
height = [series[i] if i in series else 0
for i in self.modality_order]
color = phenotype_to_color[group]
ax.bar(left, height, width=width, color=color, label=group,
linewidth=.5, edgecolor='k')
ylabel = 'Percentage of events' if percentages else 'Number of events'
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(len(self.modality_order)) + full_width / 2)
ax.set_xticklabels(self.modality_order)
ax.set_xlabel('Splicing modality')
ax.set_xlim(0, len(self.modality_order))
ax.legend(loc='best')
ax.grid(axis='y', linestyle='-', linewidth=0.5)
sns.despine() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def event_estimation(self, event, logliks, logsumexps, renamed=''):
"""Show the values underlying bayesian modality estimations of an event Parameters Returns ------- Raises ------ """ |
plotter = _ModelLoglikPlotter()
plotter.plot(event, logliks, logsumexps, self.modality_to_color,
renamed=renamed)
return plotter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, fitted):
"""Assign the most likely modality given the fitted data Parameters fitted : pandas.DataFrame or pandas.Series Either a (n_modalities, features) DatFrame or (n_modalities,) Series, either of which will return the best modality for each feature. """ |
if fitted.shape[0] != len(self.modalities):
raise ValueError("This data doesn't look like it had the distance "
"between it and the five modalities calculated")
return fitted.idxmin() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logliks(self, x):
"""Calculate log-likelihood of a feature x for each model Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001 because they are out of range of the beta distribution. Parameters x : numpy.array-like A single vector to estimate the log-likelihood of the models on Returns ------- logliks : numpy.array Log-likelihood of these data in each member of the model's family """ |
x = x.copy()
# Replace exactly 0 and exactly 1 values with a very small number
# (machine epsilon, the smallest number that this computer is capable
# of storing) because 0 and 1 are not in the Beta distribution.
x[x == 0] = VERY_SMALL_NUMBER
x[x == 1] = 1 - VERY_SMALL_NUMBER
return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()
for prob, rv in
zip(self.prob_parameters, self.rvs)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nice_number_string(number, decimal_places=2):
"""Convert floats to either integers or a nice looking fraction""" |
if number == np.round(number):
return str(int(number))
elif number < 1 and number > 0:
inverse = 1 / number
if int(inverse) == np.round(inverse):
return r'\frac{{1}}{{{}}}'.format(int(inverse))
else:
template = '{{:.{0}}}'.format(decimal_places)
return template.format(number) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def violinplot(self, n=1000, **kwargs):
"""Plot violins of each distribution in the model family Parameters n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- ax : matplotlib.Axes object Axes object with violins plotted """ |
kwargs.setdefault('palette', 'Purples')
dfs = []
for rv in self.rvs:
psi = rv.rvs(n)
df = pd.Series(psi, name=self.ylabel).to_frame()
alpha, beta = rv.args
alpha = self.nice_number_string(alpha, decimal_places=2)
beta = self.nice_number_string(beta, decimal_places=2)
df['parameters'] = '$\\alpha = {0}$\n$\\beta = {1}$'.format(
alpha, beta)
dfs.append(df)
data = pd.concat(dfs)
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(len(self.alphas)*0.625, 4))
else:
ax = kwargs.pop('ax')
ax = violinplot(x='parameters', y=self.ylabel, data=data,
ax=ax, **kwargs)
sns.despine(ax=ax)
return ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _single_feature_logliks_one_step(self, feature, models):
"""Get log-likelihood of models at each parameterization for given data Parameters feature : pandas.Series Percent-based values of a single feature. May contain NAs, but only non-NA values are used. Returns ------- logliks : pandas.DataFrame """ |
x_non_na = feature[~feature.isnull()]
if x_non_na.empty:
return pd.DataFrame()
else:
dfs = []
for name, model in models.items():
df = model.single_feature_logliks(feature)
df['Modality'] = name
dfs.append(df)
return pd.concat(dfs, ignore_index=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, data):
"""Get the modality assignments of each splicing event in the data Parameters data : pandas.DataFrame A (n_samples, n_events) dataframe of splicing events' PSI scores. Must be psi scores which range from 0 to 1 Returns ------- log2_bayes_factors : pandas.DataFrame A (n_modalities, n_events) dataframe of the estimated log2 bayes factor for each splicing event, for each modality Raises ------ AssertionError If any value in ``data`` does not fall only between 0 and 1. """ |
self.assert_less_than_or_equal_1(data.values.flat)
self.assert_non_negative(data.values.flat)
if isinstance(data, pd.DataFrame):
log2_bayes_factors = data.apply(self.single_feature_fit)
elif isinstance(data, pd.Series):
log2_bayes_factors = self.single_feature_fit(data)
log2_bayes_factors.name = self.score_name
return log2_bayes_factors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, log2_bayes_factors, reset_index=False):
"""Guess the most likely modality for each event For each event that has at least one non-NA value, if no modalilites have logsumexp'd logliks greater than the log Bayes factor threshold, then they are assigned the 'multimodal' modality, because we cannot reject the null hypothesis that these did not come from the uniform distribution. Parameters log2_bayes_factors : pandas.DataFrame A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0, bimodal, and middle modalities. If an event has no bayes factors for any of those modalities, it is ignored reset_index : bool If True, remove the first level of the index from the dataframe. Useful if you are using this function to apply to a grouped dataframe where the first level is something other than the modality, e.g. the celltype Returns ------- modalities : pandas.Series A (n_events,) series with the most likely modality for each event """ |
if reset_index:
x = log2_bayes_factors.reset_index(level=0, drop=True)
else:
x = log2_bayes_factors
if isinstance(x, pd.DataFrame):
not_na = (x.notnull() > 0).any()
not_na_columns = not_na[not_na].index
x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh
elif isinstance(x, pd.Series):
x[NULL_MODEL] = self.logbf_thresh
return x.idxmax() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def single_feature_logliks(self, feature):
"""Calculate log-likelihoods of each modality's parameterization Used for plotting the estimates of a single feature Parameters featre : pandas.Series A single feature's values. All values must range from 0 to 1. Returns ------- logliks : pandas.DataFrame The log-likelihood the data, for each model, for each parameterization Raises ------ AssertionError If any value in ``x`` does not fall only between 0 and 1. """ |
self.assert_less_than_or_equal_1(feature.values)
self.assert_non_negative(feature.values)
logliks = self._single_feature_logliks_one_step(
feature, self.one_param_models)
logsumexps = self.logliks_to_logsumexp(logliks)
# If none of the one-parameter models passed, try the two-param models
if (logsumexps <= self.logbf_thresh).all():
logliks_two_params = self._single_feature_logliks_one_step(
feature, self.two_param_models)
logliks = pd.concat([logliks, logliks_two_params])
return logliks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def single_feature_fit(self, feature):
"""Get the log2 bayes factor of the fit for each modality""" |
if np.isfinite(feature).sum() == 0:
series = pd.Series(index=MODALITY_ORDER)
else:
logbf_one_param = pd.Series(
{k: v.logsumexp_logliks(feature) for
k, v in self.one_param_models.items()})
# Check if none of the previous features fit
if (logbf_one_param <= self.logbf_thresh).all():
logbf_two_param = pd.Series(
{k: v.logsumexp_logliks(feature)
for k, v in self.two_param_models.items()})
series = pd.concat([logbf_one_param, logbf_two_param])
series[NULL_MODEL] = self.logbf_thresh
else:
series = logbf_one_param
series.index.name = 'Modality'
series.name = self.score_name
return series |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def violinplot(self, n=1000, figsize=None, **kwargs):
r"""Visualize all modality family members with parameters Use violinplots to visualize distributions of modality family members Parameters n : int Number of random variables to generate kwargs : dict or keywords Any keyword arguments to seaborn.violinplot Returns ------- fig : matplotlib.Figure object Figure object with violins plotted """ |
if figsize is None:
nrows = len(self.models)
width = max(len(m.rvs) for name, m in self.models.items())*0.625
height = nrows*2.5
figsize = width, height
fig, axes = plt.subplots(nrows=nrows, figsize=figsize)
for ax, model_name in zip(axes, MODALITY_ORDER):
try:
model = self.models[model_name]
cmap = MODALITY_TO_CMAP[model_name]
palette = cmap(np.linspace(0, 1, len(model.rvs)))
model.violinplot(n=n, ax=ax, palette=palette, **kwargs)
ax.set(title=model_name, xlabel='')
except KeyError:
continue
fig.tight_layout() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bin_range_strings(bins, fmt=':g'):
"""Given a list of bins, make a list of strings of those bin ranges Parameters bins : list_like List of anything, usually values of bin edges Returns ------- bin_ranges : list List of bin ranges ['0-0.5', '0.5-1'] """ |
return [('{' + fmt + '}-{' + fmt + '}').format(i, j)
for i, j in zip(bins, bins[1:])] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def binify(data, bins):
"""Makes a histogram of each column the provided binsize Parameters data : pandas.DataFrame A samples x features dataframe. Each feature (column) will be binned into the provided bins bins : iterable Bins you would like to use for this data. Must include the final bin value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1). nbins = len(bins) - 1 Returns ------- binned : pandas.DataFrame An nbins x features DataFrame of each column binned across rows """ |
if bins is None:
raise ValueError('Must specify "bins"')
if isinstance(data, pd.DataFrame):
binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,
range=(0, 1))[0]))
elif isinstance(data, pd.Series):
binned = pd.Series(np.histogram(data, bins=bins, range=(0, 1))[0])
else:
raise ValueError('`data` must be either a 1d vector or 2d matrix')
binned.index = bin_range_strings(bins)
# Normalize so each column sums to 1
binned = binned / binned.sum().astype(float)
return binned |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kld(p, q):
"""Kullback-Leiber divergence of two probability distributions pandas dataframes, p and q Parameters p : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series q : pandas.DataFrame An nbins x features DataFrame, or (nbins,) Series Returns ------- kld : pandas.Series Kullback-Lieber divergence of the common columns between the dataframe. E.g. between 1st column in p and 1st column in q, and 2nd column in p and 2nd column in q. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError Notes ----- The input to this function must be probability distributions, not raw values. Otherwise, the output makes no sense. """ |
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
# If one of them is zero, then the other should be considered to be 0.
# In this problem formulation, log0 = 0
p = p.replace(0, np.nan)
q = q.replace(0, np.nan)
return (np.log2(p / q) * p).sum(axis=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jsd(p, q):
"""Finds the per-column JSD between dataframes p and q Jensen-Shannon divergence of two probability distrubutions pandas dataframes, p and q. These distributions are usually created by running binify() on the dataframe. Parameters p : pandas.DataFrame An nbins x features DataFrame. q : pandas.DataFrame An nbins x features DataFrame. Returns ------- jsd : pandas.Series Jensen-Shannon divergence of each column with the same names between p and q Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError """ |
try:
_check_prob_dist(p)
_check_prob_dist(q)
except ValueError:
return np.nan
weight = 0.5
m = weight * (p + q)
result = weight * kld(p, m) + (1 - weight) * kld(q, m)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def entropy(binned, base=2):
"""Find the entropy of each column of a dataframe Parameters binned : pandas.DataFrame A nbins x features DataFrame of probability distributions, where each column sums to 1 base : numeric The log-base of the entropy. Default is 2, so the resulting entropy is in bits. Returns ------- entropy : pandas.Seires Entropy values for each column of the dataframe. Raises ------ ValueError If the data provided is not a probability distribution, i.e. it has negative values or its columns do not sum to 1, raise ValueError """ |
try:
_check_prob_dist(binned)
except ValueError:
np.nan
return -((np.log(binned) / np.log(base)) * binned).sum(axis=0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes Parameters df1, df2 : pandas.DataFrames Dataframes to calculate JSD between columns of. Must have overlapping column names bins : array-like Bins to use for transforming df{1,2} into probability distributions pair : str, optional Name of the pair to save as the name of the series Returns ------- divergence : pandas.Series The Jensen-Shannon divergence between columns of df1, df2 """ |
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes Parameters data : pandas.DataFrame A (n_samples, n_features) Dataframe groupby : mappable A samples to phenotypes mapping n_iter : int Number of bootstrap resampling iterations to perform for the within-group comparisons n_bins : int Number of bins to binify the singles data on Returns ------- jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes """ |
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jsd_df_to_2d(jsd_df):
"""Transform a tall JSD dataframe to a square matrix of mean JSDs Parameters jsd_df : pandas.DataFrame A (n_features, n_phenotypes^2) dataframe of the JSD between each feature between and within phenotypes Returns ------- jsd_2d : pandas.DataFrame A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD between and within phenotypes """ |
jsd_2d = jsd_df.mean().reset_index()
jsd_2d = jsd_2d.rename(
columns={'level_0': 'phenotype1', 'level_1': 'phenotype2', 0: 'jsd'})
jsd_2d = jsd_2d.pivot(index='phenotype1', columns='phenotype2',
values='jsd')
return jsd_2d + np.tril(jsd_2d.T, -1) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_next_value( sequence_name='default', initial_value=1, reset_value=None, *, nowait=False, using=None):
""" Return the next value for a given sequence. """ |
# Inner import because models cannot be imported before their application.
from .models import Sequence
if reset_value is not None:
assert initial_value < reset_value
if using is None:
using = router.db_for_write(Sequence)
connection = connections[using]
if (getattr(connection, 'pg_version', 0) >= 90500
and reset_value is None and not nowait):
# PostgreSQL ≥ 9.5 supports "upsert".
with connection.cursor() as cursor:
cursor.execute(UPSERT_QUERY, [sequence_name, initial_value])
last, = cursor.fetchone()
return last
else:
# Other databases require making more database queries.
with transaction.atomic(using=using, savepoint=False):
sequence, created = (
Sequence.objects
.select_for_update(nowait=nowait)
.get_or_create(name=sequence_name,
defaults={'last': initial_value})
)
if not created:
sequence.last += 1
if reset_value is not None and sequence.last >= reset_value:
sequence.last = initial_value
sequence.save()
return sequence.last |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(self, final_line_count):
"""Check the status of all provided data and update the suite.""" |
if self._lines_seen["version"]:
self._process_version_lines()
self._process_plan_lines(final_line_count) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process_version_lines(self):
"""Process version line rules.""" |
if len(self._lines_seen["version"]) > 1:
self._add_error(_("Multiple version lines appeared."))
elif self._lines_seen["version"][0] != 1:
self._add_error(_("The version must be on the first line.")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _process_plan_lines(self, final_line_count):
"""Process plan line rules.""" |
if not self._lines_seen["plan"]:
self._add_error(_("Missing a plan."))
return
if len(self._lines_seen["plan"]) > 1:
self._add_error(_("Only one plan line is permitted per file."))
return
plan, at_line = self._lines_seen["plan"][0]
if not self._plan_on_valid_line(at_line, final_line_count):
self._add_error(
_("A plan must appear at the beginning or end of the file.")
)
return
if plan.expected_tests != self._lines_seen["test"]:
self._add_error(
_("Expected {expected_count} tests but only {seen_count} ran.").format(
expected_count=plan.expected_tests,
seen_count=self._lines_seen["test"],
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _plan_on_valid_line(self, at_line, final_line_count):
"""Check if a plan is on a valid line.""" |
# Put the common cases first.
if at_line == 1 or at_line == final_line_count:
return True
# The plan may only appear on line 2 if the version is at line 1.
after_version = (
self._lines_seen["version"]
and self._lines_seen["version"][0] == 1
and at_line == 2
)
if after_version:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_bail(self, bail):
"""Handle a bail line.""" |
self._add_error(_("Bailed: {reason}").format(reason=bail.reason)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_skipping_plan(self, skip_plan):
"""Handle a plan that contains a SKIP directive.""" |
skip_line = Result(True, None, skip_plan.directive.text, Directive("SKIP"))
self._suite.addTest(Adapter(self._filename, skip_line)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _add_error(self, message):
"""Add an error test to the suite.""" |
error_line = Result(False, None, message, Directive(""))
self._suite.addTest(Adapter(self._filename, error_line)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_exception(exception):
"""Format an exception as diagnostics output. exception is the tuple as expected from sys.exc_info. """ |
exception_lines = traceback.format_exception(*exception)
# The lines returned from format_exception do not strictly contain
# one line per element in the list (i.e. some elements have new
# line characters in the middle). Normalize that oddity.
lines = "".join(exception_lines).splitlines(True)
return format_as_diagnostics(lines) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(self, fh):
"""Generate tap.line.Line objects, given a file-like object `fh`. `fh` may be any object that implements both the iterator and context management protocol (i.e. it can be used in both a Trailing whitespace and newline characters will be automatically stripped from the input lines. """ |
with fh:
try:
first_line = next(fh)
except StopIteration:
return
first_parsed = self.parse_line(first_line.rstrip())
fh_new = itertools.chain([first_line], fh)
if first_parsed.category == "version" and first_parsed.version >= 13:
if ENABLE_VERSION_13:
fh_new = peekable(itertools.chain([first_line], fh))
self._try_peeking = True
else: # pragma no cover
print(
"""
WARNING: Optional imports not found, TAP 13 output will be
ignored. To parse yaml, see requirements in docs:
https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13"""
)
for line in fh_new:
yield self.parse_line(line.rstrip(), fh_new) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_line(self, text, fh=None):
"""Parse a line into whatever TAP category it belongs.""" |
match = self.ok.match(text)
if match:
return self._parse_result(True, match, fh)
match = self.not_ok.match(text)
if match:
return self._parse_result(False, match, fh)
if self.diagnostic.match(text):
return Diagnostic(text)
match = self.plan.match(text)
if match:
return self._parse_plan(match)
match = self.bail.match(text)
if match:
return Bail(match.group("reason"))
match = self.version.match(text)
if match:
return self._parse_version(match)
return Unknown() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_plan(self, match):
"""Parse a matching plan line.""" |
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse_result(self, ok, match, fh=None):
"""Parse a matching result line into a result instance.""" |
peek_match = None
try:
if fh is not None and self._try_peeking:
peek_match = self.yaml_block_start.match(fh.peek())
except StopIteration:
pass
if peek_match is None:
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
)
indent = peek_match.group("indent")
concat_yaml = self._extract_yaml_block(indent, fh)
return Result(
ok,
number=match.group("number"),
description=match.group("description").strip(),
directive=Directive(match.group("directive")),
raw_yaml_block=concat_yaml,
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _extract_yaml_block(self, indent, fh):
"""Extract a raw yaml block from a file handler""" |
raw_yaml = []
indent_match = re.compile(r"^{}".format(indent))
try:
fh.next()
while indent_match.match(fh.peek()):
raw_yaml.append(fh.next().replace(indent, "", 1))
# check for the end and stop adding yaml if encountered
if self.yaml_block_end.match(fh.peek()):
fh.next()
break
except StopIteration:
pass
return "\n".join(raw_yaml) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yaml_block(self):
"""Lazy load a yaml_block. If yaml support is not available, there is an error in parsing the yaml block, or no yaml is associated with this result, ``None`` will be returned. :rtype: dict """ |
if LOAD_YAML and self._yaml_block is not None:
try:
yaml_dict = yaml.load(self._yaml_block)
return yaml_dict
except yaml.error.YAMLError:
print("Error parsing yaml block. Check formatting.")
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, files):
"""Load any files found into a suite. Any directories are walked and their files are added as TAP files. :returns: A ``unittest.TestSuite`` instance """ |
suite = unittest.TestSuite()
for filepath in files:
if os.path.isdir(filepath):
self._find_tests_in_directory(filepath, suite)
else:
suite.addTest(self.load_suite_from_file(filepath))
return suite |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_suite_from_file(self, filename):
"""Load a test suite with test lines from the provided TAP file. :returns: A ``unittest.TestSuite`` instance """ |
suite = unittest.TestSuite()
rules = Rules(filename, suite)
if not os.path.exists(filename):
rules.handle_file_does_not_exist()
return suite
line_generator = self._parser.parse_file(filename)
return self._load_lines(filename, line_generator, suite, rules) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_suite_from_stdin(self):
"""Load a test suite with test lines from the TAP stream on STDIN. :returns: A ``unittest.TestSuite`` instance """ |
suite = unittest.TestSuite()
rules = Rules("stream", suite)
line_generator = self._parser.parse_stdin()
return self._load_lines("stream", line_generator, suite, rules) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_lines(self, filename, line_generator, suite, rules):
"""Load a suite with lines produced by the line generator.""" |
line_counter = 0
for line in line_generator:
line_counter += 1
if line.category in self.ignored_lines:
continue
if line.category == "test":
suite.addTest(Adapter(filename, line))
rules.saw_test()
elif line.category == "plan":
if line.skip:
rules.handle_skipping_plan(line)
return suite
rules.saw_plan(line, line_counter)
elif line.category == "bail":
rules.handle_bail(line)
return suite
elif line.category == "version":
rules.saw_version_at(line_counter)
rules.check(line_counter)
return suite |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _track(self, class_name):
"""Keep track of which test cases have executed.""" |
if self._test_cases.get(class_name) is None:
if self.streaming and self.header:
self._write_test_case_header(class_name, self.stream)
self._test_cases[class_name] = []
if self.combined:
self.combined_test_cases_seen.append(class_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_plan(self, total):
"""Notify the tracker how many total tests there will be.""" |
self.plan = total
if self.streaming:
# This will only write the plan if we haven't written it
# already but we want to check if we already wrote a
# test out (in which case we can't just write the plan out
# right here).
if not self.combined_test_cases_seen:
self._write_plan(self.stream)
elif not self.combined:
raise ValueError(
"set_plan can only be used with combined or streaming output"
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_tap_reports(self):
"""Generate TAP reports. The results are either combined into a single output file or the output file name is generated from the test case. """ |
# We're streaming but set_plan wasn't called, so we can only
# know the plan now (at the end).
if self.streaming and not self._plan_written:
print("1..{0}".format(self.combined_line_number), file=self.stream)
self._plan_written = True
return
if self.combined:
combined_file = "testresults.tap"
if self.outdir:
combined_file = os.path.join(self.outdir, combined_file)
with open(combined_file, "w") as out_file:
self._write_tap_version(out_file)
if self.plan is not None:
print("1..{0}".format(self.plan), file=out_file)
for test_case in self.combined_test_cases_seen:
self.generate_tap_report(
test_case, self._test_cases[test_case], out_file
)
if self.plan is None:
print("1..{0}".format(self.combined_line_number), file=out_file)
else:
for test_case, tap_lines in self._test_cases.items():
with open(self._get_tap_file_path(test_case), "w") as out_file:
self._write_tap_version(out_file)
self.generate_tap_report(test_case, tap_lines, out_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_plan(self, stream):
"""Write the plan line to the stream. If we have a plan and have not yet written it out, write it to the given stream. """ |
if self.plan is not None:
if not self._plan_written:
print("1..{0}".format(self.plan), file=stream)
self._plan_written = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_tap_file_path(self, test_case):
"""Get the TAP output file path for the test case.""" |
sanitized_test_case = test_case.translate(self._sanitized_table)
tap_file = sanitized_test_case + ".tap"
if self.outdir:
return os.path.join(self.outdir, tap_file)
return tap_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main(argv=sys.argv, stream=sys.stderr):
"""Entry point for ``tappy`` command.""" |
args = parse_args(argv)
suite = build_suite(args)
runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream)
result = runner.run(suite)
return get_status(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_suite(args):
"""Build a test suite by loading TAP files or a TAP stream.""" |
loader = Loader()
if len(args.files) == 0 or args.files[0] == "-":
suite = loader.load_suite_from_stdin()
else:
suite = loader.load(args.files)
return suite |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def addFailure(self, result):
"""Add a failure to the result.""" |
result.addFailure(self, (Exception, Exception(), None))
# Since TAP will not provide assertion data, clean up the assertion
# section so it is not so spaced out.
test, err = result.failures[-1]
result.failures[-1] = (test, "") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def after_flush_postexec(self, session, context):
""" Event listener to recursively expire `left` and `right` attributes the parents of all modified instances part of this flush. """ |
instances = self.instances[session]
while instances:
instance = instances.pop()
if instance not in session:
continue
parent = self.get_parent_value(instance)
while parent != NO_VALUE and parent is not None:
instances.discard(parent)
session.expire(parent, ['left', 'right', 'tree_id', 'level'])
parent = self.get_parent_value(parent)
else:
session.expire(instance, ['left', 'right', 'tree_id', 'level'])
self.expire_session_for_children(session, instance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_inside(self, parent_id):
""" Moving one node of tree inside another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_inside_to_the_same_parent_function` """ | # noqa
session = Session.object_session(self)
self.parent_id = parent_id
self.mptt_move_inside = parent_id
session.add(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_after(self, node_id):
""" Moving one node of tree after another For example see :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_after_function` """ | # noqa
session = Session.object_session(self)
self.parent_id = self.parent_id
self.mptt_move_after = node_id
session.add(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_before(self, node_id):
""" Moving one node of tree before another For example see: * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_function` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_other_tree` * :mod:`sqlalchemy_mptt.tests.cases.move_node.test_move_before_to_top_level` """ | # noqa
session = Session.object_session(self)
table = _get_tree_table(self.__mapper__)
pk = getattr(table.c, self.get_pk_column().name)
node = session.query(table).filter(pk == node_id).one()
self.parent_id = node.parent_id
self.mptt_move_before = node_id
session.add(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def leftsibling_in_level(self):
""" Node to the left of the current node at the same level For example see :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_leftsibling_in_level` """ | # noqa
table = _get_tree_table(self.__mapper__)
session = Session.object_session(self)
current_lvl_nodes = session.query(table) \
.filter_by(level=self.level).filter_by(tree_id=self.tree_id) \
.filter(table.c.lft < self.left).order_by(table.c.lft).all()
if current_lvl_nodes:
return current_lvl_nodes[-1]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _node_to_dict(cls, node, json, json_fields):
""" Helper method for ``get_tree``. """ |
if json:
pk_name = node.get_pk_name()
# jqTree or jsTree format
result = {'id': getattr(node, pk_name), 'label': node.__repr__()}
if json_fields:
result.update(json_fields(node))
else:
result = {'node': node}
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tree(cls, session=None, json=False, json_fields=None, query=None):
""" This method generate tree of current node table in dict or json format. You can make custom query with attribute ``query``. By default it return all nodes in table. Args: session (:mod:`sqlalchemy.orm.session.Session`):
SQLAlchemy session Kwargs: json (bool):
if True return JSON jqTree format json_fields (function):
append custom fields in JSON query (function):
it takes :class:`sqlalchemy.orm.query.Query` object as an argument, and returns in a modified form :: def query(nodes):
return nodes.filter(node.__class__.tree_id.is_(node.tree_id)) node.get_tree(session=DBSession, json=True, query=query) Example: * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree` * :mod:`sqlalchemy_mptt.tests.cases.get_tree.test_get_json_tree_with_custom_field` """ | # noqa
tree = []
nodes_of_level = {}
# handle custom query
nodes = cls._base_query(session)
if query:
nodes = query(nodes)
nodes = cls._base_order(nodes).all()
# search minimal level of nodes.
min_level = min([node.level for node in nodes] or [None])
def get_node_id(node):
return getattr(node, node.get_pk_name())
for node in nodes:
result = cls._node_to_dict(node, json, json_fields)
parent_id = node.parent_id
if node.level != min_level: # for cildren
# Find parent in the tree
if parent_id not in nodes_of_level.keys():
continue
if 'children' not in nodes_of_level[parent_id]:
nodes_of_level[parent_id]['children'] = []
# Append node to parent
nl = nodes_of_level[parent_id]['children']
nl.append(result)
nodes_of_level[get_node_id(node)] = nl[-1]
else: # for top level nodes
tree.append(result)
nodes_of_level[get_node_id(node)] = tree[-1]
return tree |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.