text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_ancestors(self): """ Iterates over the list of all ancestor nodes from current node to the current tree root. """
node = self while node.up is not None: yield node.up node = node.up
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tree_root(self): """ Returns the absolute root node of current tree structure."""
root = self while root.up is not None: root = root.up return root
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_common_ancestor(self, *target_nodes, **kargs): """ Returns the first common ancestor between this node and a given list of 'target_nodes'. **Examples:** t = tree.Tree("(((A:0.1, B:0.01): 0.001, C:0.0001): 1.0[&&NHX:name=common], (D:0.00001): 0.000001): 2.0[&&NHX:name=root];") A = t.get_descendants_by_name("A")[0] C = t.get_descendants_by_name("C")[0] common = A.get_common_ancestor(C) print common.name """
get_path = kargs.get("get_path", False) if len(target_nodes) == 1 and type(target_nodes[0]) \ in set([set, tuple, list, frozenset]): target_nodes = target_nodes[0] # Convert node names into node instances target_nodes = _translate_nodes(self, *target_nodes) # If only one node is provided, use self as the second target if type(target_nodes) != list: target_nodes = [target_nodes, self] n2path = {} reference = [] ref_node = None for n in target_nodes: current = n while current: n2path.setdefault(n, set()).add(current) if not ref_node: reference.append(current) current = current.up if not ref_node: ref_node = n common = None for n in reference: broken = False for node, path in six.iteritems(n2path): if node is not ref_node and n not in path: broken = True break if not broken: common = n break if not common: raise TreeError("Nodes are not connected!") if get_path: return common, n2path else: return common
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_search_nodes(self, **conditions): """ Search nodes in an interative way. Matches are being yield as they are being found. This avoids to scan the full tree topology before returning the first matches. Useful when dealing with huge trees. """
for n in self.traverse(): conditions_passed = 0 for key, value in six.iteritems(conditions): if hasattr(n, key) and getattr(n, key) == value: conditions_passed +=1 if conditions_passed == len(conditions): yield n
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_farthest_node(self, topology_only=False): """ Returns the node's farthest descendant or ancestor node, and the distance to it. :argument False topology_only: If set to True, distance between nodes will be referred to the number of nodes between them. In other words, topological distance will be used instead of branch length distances. :return: A tuple containing the farthest node referred to the current node and the distance to it. """
# Init fasthest node to current farthest leaf farthest_node, farthest_dist = self.get_farthest_leaf( topology_only=topology_only) prev = self cdist = 0.0 if topology_only else prev.dist current = prev.up while current is not None: for ch in current.children: if ch != prev: if not ch.is_leaf(): fnode, fdist = ch.get_farthest_leaf( topology_only=topology_only) else: fnode = ch fdist = 0 if topology_only: fdist += 1.0 else: fdist += ch.dist if cdist+fdist > farthest_dist: farthest_dist = cdist + fdist farthest_node = fnode prev = current if topology_only: cdist += 1 else: cdist += prev.dist current = prev.up return farthest_node, farthest_dist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_midpoint_outgroup(self): """ Returns the node that divides the current tree into two distance-balanced partitions. """
# Gets the farthest node to the current root root = self.get_tree_root() nA, r2A_dist = root.get_farthest_leaf() nB, A2B_dist = nA.get_farthest_node() outgroup = nA middist = A2B_dist / 2.0 cdist = 0 current = nA while current is not None: cdist += current.dist if cdist > (middist): # Deja de subir cuando se pasa del maximo break else: current = current.up return current
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate(self, size, names_library=None, reuse_names=False, random_branches=False, branch_range=(0, 1), support_range=(0, 1)): """ Generates a random topology by populating current node. :argument None names_library: If provided, names library (list, set, dict, etc.) will be used to name nodes. :argument False reuse_names: If True, node names will not be necessarily unique, which makes the process a bit more efficient. :argument False random_branches: If True, branch distances and support values will be randomized. :argument (0,1) branch_range: If random_branches is True, this range of values will be used to generate random distances. :argument (0,1) support_range: If random_branches is True, this range of values will be used to generate random branch support values. """
NewNode = self.__class__ if len(self.children) > 1: connector = NewNode() for ch in self.get_children(): ch.detach() connector.add_child(child = ch) root = NewNode() self.add_child(child = connector) self.add_child(child = root) else: root = self next_deq = deque([root]) for i in range(size-1): if random.randint(0, 1): p = next_deq.pop() else: p = next_deq.popleft() c1 = p.add_child() c2 = p.add_child() next_deq.extend([c1, c2]) if random_branches: c1.dist = random.uniform(*branch_range) c2.dist = random.uniform(*branch_range) c1.support = random.uniform(*branch_range) c2.support = random.uniform(*branch_range) else: c1.dist = 1.0 c2.dist = 1.0 c1.support = 1.0 c2.support = 1.0 # next contains leaf nodes charset = "abcdefghijklmnopqrstuvwxyz" if names_library: names_library = deque(names_library) else: avail_names = itertools.combinations_with_replacement(charset, 10) for n in next_deq: if names_library: if reuse_names: tname = random.sample(names_library, 1)[0] else: tname = names_library.pop() else: tname = ''.join(next(avail_names)) n.name = tname
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_outgroup(self, outgroup): """ Sets a descendant node as the outgroup of a tree. This function can be used to root a tree or even an internal node. Parameters: outgroup: a node instance within the same tree structure that will be used as a basal node. """
outgroup = _translate_nodes(self, outgroup) if self == outgroup: ##return ## why raise an error for this? raise TreeError("Cannot set myself as outgroup") parent_outgroup = outgroup.up # Detects (sub)tree root n = outgroup while n.up is not self: n = n.up # If outgroup is a child from root, but with more than one # sister nodes, creates a new node to group them self.children.remove(n) if len(self.children) != 1: down_branch_connector = self.__class__() down_branch_connector.dist = 0.0 down_branch_connector.support = n.support for ch in self.get_children(): down_branch_connector.children.append(ch) ch.up = down_branch_connector self.children.remove(ch) else: down_branch_connector = self.children[0] # Connects down branch to myself or to outgroup quien_va_ser_padre = parent_outgroup if quien_va_ser_padre is not self: # Parent-child swapping quien_va_ser_hijo = quien_va_ser_padre.up quien_fue_padre = None buffered_dist = quien_va_ser_padre.dist buffered_support = quien_va_ser_padre.support while quien_va_ser_hijo is not self: quien_va_ser_padre.children.append(quien_va_ser_hijo) quien_va_ser_hijo.children.remove(quien_va_ser_padre) buffered_dist2 = quien_va_ser_hijo.dist buffered_support2 = quien_va_ser_hijo.support quien_va_ser_hijo.dist = buffered_dist quien_va_ser_hijo.support = buffered_support buffered_dist = buffered_dist2 buffered_support = buffered_support2 quien_va_ser_padre.up = quien_fue_padre quien_fue_padre = quien_va_ser_padre quien_va_ser_padre = quien_va_ser_hijo quien_va_ser_hijo = quien_va_ser_padre.up quien_va_ser_padre.children.append(down_branch_connector) down_branch_connector.up = quien_va_ser_padre quien_va_ser_padre.up = quien_fue_padre down_branch_connector.dist += buffered_dist outgroup2 = parent_outgroup parent_outgroup.children.remove(outgroup) outgroup2.dist = 0 else: outgroup2 = down_branch_connector outgroup.up = self outgroup2.up = self # outgroup is always the first children. Some function my # trust on this fact, so do no change this. self.children = [outgroup,outgroup2] middist = (outgroup2.dist + outgroup.dist)/2 outgroup.dist = middist outgroup2.dist = middist outgroup2.support = outgroup.support
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unroot(self): """ Unroots current node. This function is expected to be used on the absolute tree root node, but it can be also be applied to any other internal node. It will convert a split into a multifurcation. """
if len(self.children)==2: if not self.children[0].is_leaf(): self.children[0].delete() elif not self.children[1].is_leaf(): self.children[1].delete() else: raise TreeError("Cannot unroot a tree with only two leaves")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None): """ Returns the ASCII representation of the tree. Code based on the PyCogent GPL project. """
if not attributes: attributes = ["name"] # toytree edit: # removed six dependency for map with comprehension # node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)])) _attrlist = [getattr(self, v) for v in attributes if hasattr(self, v)] node_name = ", ".join([str(i) for i in _attrlist]) LEN = max(3, len(node_name) if not self.children or show_internal else 3) PAD = ' ' * LEN PA = ' ' * (LEN-1) if not self.is_leaf(): mids = [] result = [] for c in self.children: if len(self.children) == 1: char2 = '/' elif c is self.children[0]: char2 = '/' elif c is self.children[-1]: char2 = '\\' else: char2 = '-' (clines, mid) = c._asciiArt(char2, show_internal, compact, attributes) mids.append(mid+len(result)) result.extend(clines) if not compact: result.append('') if not compact: result.pop() (lo, hi, end) = (mids[0], mids[-1], len(result)) prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi) mid = int((lo + hi) / 2) prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1] result = [p+l for (p,l) in zip(prefixes, result)] if show_internal: stem = result[mid] result[mid] = stem[0] + node_name + stem[len(node_name)+1:] return (result, mid) else: return ([char1 + '-' + node_name], 0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort_descendants(self, attr="name"): """ This function sort the branches of a given tree by considerening node names. After the tree is sorted, nodes are labeled using ascendent numbers. This can be used to ensure that nodes in a tree with the same node names are always labeled in the same way. Note that if duplicated names are present, extra criteria should be added to sort nodes. Unique id is stored as a node._nid attribute """
node2content = self.get_cached_content(store_attr=attr, container_type=list) for n in self.traverse(): if not n.is_leaf(): n.children.sort(key=lambda x: str(sorted(node2content[x])))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_edges(self, cached_content=None): """ Iterate over the list of edges of a tree. Each egde is represented as a tuple of two elements, each containing the list of nodes separated by the edge. """
if not cached_content: cached_content = self.get_cached_content() all_leaves = cached_content[self] for n, side1 in six.iteritems(cached_content): yield (side1, all_leaves - side1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_monophyly(self, values, target_attr, ignore_missing=False, unrooted=False): """ Returns True if a given target attribute is monophyletic under this node for the provided set of values. If not all values are represented in the current tree structure, a ValueError exception will be raised to warn that strict monophyly could never be reached (this behaviour can be avoided by enabling the `ignore_missing` flag. Parameters: values: a set of values for which monophyly is expected. target_attr: node attribute being used to check monophyly (i.e. species for species trees, names for gene family trees, or any custom feature present in the tree). ignore_missing: Avoid raising an Exception when missing attributes are found. unrooted: If True, tree will be treated as unrooted, thus allowing to find monophyly even when current outgroup is spliting a monophyletic group. Returns: -------- the following tuple IsMonophyletic (boolean), clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'), leaves breaking the monophyly (set) """
if type(values) != set: values = set(values) # This is the only time I traverse the tree, then I use cached # leaf content n2leaves = self.get_cached_content() # Raise an error if requested attribute values are not even present if ignore_missing: found_values = set([getattr(n, target_attr) for n in n2leaves[self]]) missing_values = values - found_values values = values & found_values # Locate leaves matching requested attribute values targets = set([leaf for leaf in n2leaves[self] if getattr(leaf, target_attr) in values]) if not ignore_missing: if values - set([getattr(leaf, target_attr) for leaf in targets]): raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.' ' Please check your target attribute and values, or set the ignore_missing flag to True') if unrooted: smallest = None for side1, side2 in self.iter_edges(cached_content=n2leaves): if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)): smallest = side1 elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)): smallest = side2 if smallest is not None and len(smallest) == len(targets): break foreign_leaves = smallest - targets else: # Check monophyly with get_common_ancestor. Note that this # step does not require traversing the tree again because # targets are node instances instead of node names, and # get_common_ancestor function is smart enough to detect it # and avoid unnecessary traversing. common = self.get_common_ancestor(targets) observed = n2leaves[common] foreign_leaves = set([leaf for leaf in observed if getattr(leaf, target_attr) not in values]) if not foreign_leaves: return True, "monophyletic", foreign_leaves else: # if the requested attribute is not monophyletic in this # node, let's differentiate between poly and paraphyly. poly_common = self.get_common_ancestor(foreign_leaves) # if the common ancestor of all foreign leaves is self # contained, we have a paraphyly. Otherwise, polyphyly. polyphyletic = [leaf for leaf in poly_common if getattr(leaf, target_attr) in values] if polyphyletic: return False, "polyphyletic", foreign_leaves else: return False, "paraphyletic", foreign_leaves
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_monophyletic(self, values, target_attr): """ Returns a list of nodes matching the provided monophyly criteria. For a node to be considered a match, all `target_attr` values within and node, and exclusively them, should be grouped. :param values: a set of values for which monophyly is expected. :param target_attr: node attribute being used to check monophyly (i.e. species for species trees, names for gene family trees). """
if type(values) != set: values = set(values) n2values = self.get_cached_content(store_attr=target_attr) is_monophyletic = lambda node: n2values[node] == values for match in self.iter_leaves(is_leaf_fn=is_monophyletic): if is_monophyletic(match): yield match
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def truncate_empty_lines(lines): """ Removes all empty lines from above and below the text. We can't just use text.strip() because that would remove the leading space for the table. Parameters lines : list of str Returns ------- lines : list of str The text lines without empty lines above or below """
while lines[0].rstrip() == '': lines.pop(0) while lines[len(lines) - 1].rstrip() == '': lines.pop(-1) return lines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def jstimestamp_slow(dte): '''Convert a date or datetime object into a javsacript timestamp''' year, month, day, hour, minute, second = dte.timetuple()[:6] days = date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 hours = days*24 + hour minutes = hours*60 + minute seconds = minutes*60 + second if isinstance(dte,datetime): return 1000*seconds + 0.001*dte.microsecond else: return 1000*seconds
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def jstimestamp(dte): '''Convert a date or datetime object into a javsacript timestamp.''' days = date(dte.year, dte.month, 1).toordinal() - _EPOCH_ORD + dte.day - 1 hours = days*24 if isinstance(dte,datetime): hours += dte.hour minutes = hours*60 + dte.minute seconds = minutes*60 + dte.second return 1000*seconds + int(0.001*dte.microsecond) else: return 3600000*hours
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html2rst(html_string, force_headers=False, center_cells=False, center_headers=False): """ Convert a string or html file to an rst table string. Parameters html_string : str Either the html string, or the filepath to the html force_headers : bool Make the first row become headers, whether or not they are headers in the html file. center_cells : bool Whether or not to center the contents of the cells center_headers : bool Whether or not to center the contents of the header cells Returns ------- str The html table converted to an rst grid table Notes ----- This function **requires** BeautifulSoup_ to work. Example ------- | Header 1 | Header 2 | Header 3 | +=====================+================+==============+ | This is a paragraph | - List item 1 | #. Ordered 1 | | | - List item 2 | #. Ordered 2 | .. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/ """
if os.path.isfile(html_string): file = open(html_string, 'r', encoding='utf-8') lines = file.readlines() file.close() html_string = ''.join(lines) table_data, spans, use_headers = html2data( html_string) if table_data == '': return '' if force_headers: use_headers = True return data2rst(table_data, spans, use_headers, center_cells, center_headers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_span(row, column, extra_rows, extra_columns): """ Create a list of rows and columns that will make up a span Parameters row : int The row of the first cell in the span column : int The column of the first cell in the span extra_rows : int The number of rows that make up the span extra_columns : int The number of columns that make up the span Returns ------- span : list of lists of int A span is a list of [row, column] pairs that make up a span """
span = [[row, column]] for r in range(row, row + extra_rows + 1): span.append([r, column]) for c in range(column, column + extra_columns + 1): span.append([row, c]) span.append([r, c]) return span
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_cell(table, span, widths, heights, use_headers): """ Convert the contents of a span of the table to a grid table cell Parameters table : list of lists of str The table of rows containg strings to convert to a grid table span : list of lists of int list of [row, column] pairs that make up a span in the table widths : list of int list of the column widths of the table heights : list of int list of the heights of each row in the table use_headers : bool Whether or not to use headers in the table Returns ------- cell : dashtable.data2rst.Cell """
width = get_span_char_width(span, widths) height = get_span_char_height(span, heights) text_row = span[0][0] text_column = span[0][1] text = table[text_row][text_column] lines = text.split("\n") for i in range(len(lines)): width_difference = width - len(lines[i]) lines[i] = ''.join([lines[i], " " * width_difference]) height_difference = height - len(lines) empty_lines = [] for i in range(0, height_difference): empty_lines.append(" " * width) lines.extend(empty_lines) output = [ ''.join(["+", (width * "-") + "+"]) ] for i in range(0, height): output.append("|" + lines[i] + "|") if use_headers and span[0][0] == 0: symbol = "=" else: symbol = "-" output.append( ''.join(["+", width * symbol, "+"]) ) text = "\n".join(output) row_count = get_span_row_count(span) column_count = get_span_column_count(span) cell = Cell(text, text_row, text_column, row_count, column_count) return cell
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_db(self, app, entry_point_group='invenio_db.models', **kwargs): """Initialize Flask-SQLAlchemy extension."""
# Setup SQLAlchemy app.config.setdefault( 'SQLALCHEMY_DATABASE_URI', 'sqlite:///' + os.path.join(app.instance_path, app.name + '.db') ) app.config.setdefault('SQLALCHEMY_ECHO', False) # Initialize Flask-SQLAlchemy extension. database = kwargs.get('db', db) database.init_app(app) # Initialize versioning support. self.init_versioning(app, database, kwargs.get('versioning_manager')) # Initialize model bases if entry_point_group: for base_entry in pkg_resources.iter_entry_points( entry_point_group): base_entry.load() # All models should be loaded by now. sa.orm.configure_mappers() # Ensure that versioning classes have been built. if app.config['DB_VERSIONING']: manager = self.versioning_manager if manager.pending_classes: if not versioning_models_registered(manager, database.Model): manager.builder.configure_versioned_classes() elif 'transaction' not in database.metadata.tables: manager.declarative_base = database.Model manager.create_transaction_model() manager.plugins.after_build_tx_class(manager)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_versioning(self, app, database, versioning_manager=None): """Initialize the versioning support using SQLAlchemy-Continuum."""
try: pkg_resources.get_distribution('sqlalchemy_continuum') except pkg_resources.DistributionNotFound: # pragma: no cover default_versioning = False else: default_versioning = True app.config.setdefault('DB_VERSIONING', default_versioning) if not app.config['DB_VERSIONING']: return if not default_versioning: # pragma: no cover raise RuntimeError( 'Please install extra versioning support first by running ' 'pip install invenio-db[versioning].' ) # Now we can import SQLAlchemy-Continuum. from sqlalchemy_continuum import make_versioned from sqlalchemy_continuum import versioning_manager as default_vm from sqlalchemy_continuum.plugins import FlaskPlugin # Try to guess user model class: if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover try: pkg_resources.get_distribution('invenio_accounts') except pkg_resources.DistributionNotFound: user_cls = None else: user_cls = 'User' else: user_cls = app.config.get('DB_VERSIONING_USER_MODEL') plugins = [FlaskPlugin()] if user_cls else [] # Call make_versioned() before your models are defined. self.versioning_manager = versioning_manager or default_vm make_versioned( user_cls=user_cls, manager=self.versioning_manager, plugins=plugins, ) # Register models that have been loaded beforehand. builder = self.versioning_manager.builder for tbl in database.metadata.tables.values(): builder.instrument_versioned_classes( database.mapper, get_class_by_table(database.Model, tbl) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_table(html_string, row_count, column_count): """ Convert an html string to data table Parameters html_string : str row_count : int column_count : int Returns ------- data_table : list of lists of str """
try: from bs4 import BeautifulSoup from bs4.element import Tag except ImportError: print("ERROR: You must have BeautifulSoup to use html2data") return #html_string = convertRichText(html_string) data_table = [] for row in range(0, row_count): data_table.append([]) for column in range(0, column_count): data_table[-1].append(None) soup = BeautifulSoup(html_string, 'html.parser') table = soup.find('table') if not table: return '' trs = table.findAll('tr') if len(trs) == 0: return [['']] for tr in range(len(trs)): ths = trs[tr].findAll('th') if len(ths) == 0: tds = trs[tr].findAll('td') else: tds = ths if len(tds) == 0: tds = [] for i in range(0, column_count): tds.append(Tag("", name="")) for i in range(len(tds)): td = tds[i] row, column = find_unassigned_table_cell(data_table) r_span_count = 1 c_span_count = 1 if td.has_attr('rowspan'): r_span_count = int(td['rowspan']) if td.has_attr('colspan'): c_span_count = int(td['colspan']) for row_prime in range(row, row + r_span_count): for column_prime in range(column, column + c_span_count): if row_prime == row and column_prime == column: items = [] for item in td.contents: items.append(str(item)) string = ''.join(items).strip() text = restructify(string).rstrip() data_table[row_prime][column_prime] = text else: data_table[row_prime][column_prime] = "" if i + 1 < column_count and i == len(tds) - 1: for x in range(len(tds), column_count): if data_table[row][x] is None: data_table[row][x] = "" for row in range(len(data_table)): for column in range(len(data_table[row])): if not data_table[row][column]: data_table[row][column] = "" return data_table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_sqlite_connect(dbapi_connection, connection_record): """Ensure SQLite checks foreign key constraints. For further details see "Foreign key support" sections on https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support """
# Enable foreign key constraint checking cursor = dbapi_connection.cursor() cursor.execute('PRAGMA foreign_keys=ON') cursor.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_driver_hacks(self, app, info, options): """Call before engine creation."""
# Don't forget to apply hacks defined on parent object. super(SQLAlchemy, self).apply_driver_hacks(app, info, options) if info.drivername == 'sqlite': connect_args = options.setdefault('connect_args', {}) if 'isolation_level' not in connect_args: # disable pysqlite's emitting of the BEGIN statement entirely. # also stops it from emitting COMMIT before any DDL. connect_args['isolation_level'] = None if not event.contains(Engine, 'connect', do_sqlite_connect): event.listen(Engine, 'connect', do_sqlite_connect) if not event.contains(Engine, 'begin', do_sqlite_begin): event.listen(Engine, 'begin', do_sqlite_begin) from sqlite3 import register_adapter def adapt_proxy(proxy): """Get current object and try to adapt it again.""" return proxy._get_current_object() register_adapter(LocalProxy, adapt_proxy) elif info.drivername == 'postgresql+psycopg2': # pragma: no cover from psycopg2.extensions import adapt, register_adapter def adapt_proxy(proxy): """Get current object and try to adapt it again.""" return adapt(proxy._get_current_object()) register_adapter(LocalProxy, adapt_proxy) elif info.drivername == 'mysql+pymysql': # pragma: no cover from pymysql import converters def escape_local_proxy(val, mapping): """Get current object and try to adapt it again.""" return converters.escape_item( val._get_current_object(), self.engine.dialect.encoding, mapping=mapping, ) converters.conversions[LocalProxy] = escape_local_proxy converters.encoders[LocalProxy] = escape_local_proxy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(verbose): """Create tables."""
click.secho('Creating all tables!', fg='yellow', bold=True) with click.progressbar(_db.metadata.sorted_tables) as bar: for table in bar: if verbose: click.echo(' Creating table {0}'.format(table)) table.create(bind=_db.engine, checkfirst=True) create_alembic_version_table() click.secho('Created all tables!', fg='green')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop(verbose): """Drop tables."""
click.secho('Dropping all tables!', fg='red', bold=True) with click.progressbar(reversed(_db.metadata.sorted_tables)) as bar: for table in bar: if verbose: click.echo(' Dropping table {0}'.format(table)) table.drop(bind=_db.engine, checkfirst=True) drop_alembic_version_table() click.secho('Dropped all tables!', fg='green')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def destroy(): """Drop database."""
click.secho('Destroying database {0}'.format(_db.engine.url), fg='red', bold=True) if _db.engine.name == 'sqlite': try: drop_database(_db.engine.url) except FileNotFoundError as e: click.secho('Sqlite database has not been initialised', fg='red', bold=True) else: drop_database(_db.engine.url)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_span_column_count(span): """ Find the length of a colspan. Parameters span : list of lists of int The [row, column] pairs that make up the span Returns ------- columns : int The number of columns included in the span Example ------- Consider this table:: | foo | bar | +------+--------+---------+ | spam | goblet | berries | +------+--------+---------+ :: 2 """
columns = 1 first_column = span[0][1] for i in range(len(span)): if span[i][1] > first_column: columns += 1 first_column = span[i][1] return columns
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_dict(self): "returns self as a dictionary with _underscore subdicts corrected." ndict = {} for key, val in self.__dict__.items(): if key[0] == "_": ndict[key[1:]] = val else: ndict[key] = val return ndict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_span_char_width(span, column_widths): """ Sum the widths of the columns that make up the span, plus the extra. Parameters span : list of lists of int list of [row, column] pairs that make up the span column_widths : list of int The widths of the columns that make up the table Returns ------- total_width : int The total width of the span """
start_column = span[0][1] column_count = get_span_column_count(span) total_width = 0 for i in range(start_column, start_column + column_count): total_width += column_widths[i] total_width += column_count - 1 return total_width
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rebuild_encrypted_properties(old_key, model, properties): """Rebuild a model's EncryptedType properties when the SECRET_KEY is changed. :param old_key: old SECRET_KEY. :param model: the affected db model. :param properties: list of properties to rebuild. """
inspector = reflection.Inspector.from_engine(db.engine) primary_key_names = inspector.get_primary_keys(model.__tablename__) new_secret_key = current_app.secret_key db.session.expunge_all() try: with db.session.begin_nested(): current_app.secret_key = old_key db_columns = [] for primary_key in primary_key_names: db_columns.append(getattr(model, primary_key)) for prop in properties: db_columns.append(getattr(model, prop)) old_rows = db.session.query(*db_columns).all() except Exception as e: current_app.logger.error( 'Exception occurred while reading encrypted properties. ' 'Try again before starting the server with the new secret key.') raise e finally: current_app.secret_key = new_secret_key db.session.expunge_all() for old_row in old_rows: primary_keys, old_entries = old_row[:len(primary_key_names)], \ old_row[len(primary_key_names):] primary_key_fields = dict(zip(primary_key_names, primary_keys)) update_values = dict(zip(properties, old_entries)) model.query.filter_by(**primary_key_fields).\ update(update_values) db.session.commit()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_alembic_version_table(): """Create alembic_version table."""
alembic = current_app.extensions['invenio-db'].alembic if not alembic.migration_context._has_version_table(): alembic.migration_context._ensure_version_table() for head in alembic.script_directory.revision_map._real_heads: alembic.migration_context.stamp(alembic.script_directory, head)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop_alembic_version_table(): """Drop alembic_version table."""
if _db.engine.dialect.has_table(_db.engine, 'alembic_version'): alembic_version = _db.Table('alembic_version', _db.metadata, autoload_with=_db.engine) alembic_version.drop(bind=_db.engine)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def versioning_model_classname(manager, model): """Get the name of the versioned model class."""
if manager.options.get('use_module_name', True): return '%s%sVersion' % ( model.__module__.title().replace('.', ''), model.__name__) else: return '%sVersion' % (model.__name__,)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def versioning_models_registered(manager, base): """Return True if all versioning models have been registered."""
declared_models = base._decl_class_registry.keys() return all(versioning_model_classname(manager, c) in declared_models for c in manager.pending_classes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def vector_to_symmetric(v): '''Convert an iterable into a symmetric matrix.''' np = len(v) N = (int(sqrt(1 + 8*np)) - 1)//2 if N*(N+1)//2 != np: raise ValueError('Cannot convert vector to symmetric matrix') sym = ndarray((N,N)) iterable = iter(v) for r in range(N): for c in range(r+1): sym[r,c] = sym[c,r] = iterable.next() return sym
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def corr(self): '''The correlation matrix''' cov = self.cov() N = cov.shape[0] corr = ndarray((N,N)) for r in range(N): for c in range(r): corr[r,c] = corr[c,r] = cov[r,c]/sqrt(cov[r,r]*cov[c,c]) corr[r,r] = 1. return corr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def calmar(sharpe, T = 1.0): ''' Calculate the Calmar ratio for a Weiner process @param sharpe: Annualized Sharpe ratio @param T: Time interval in years ''' x = 0.5*T*sharpe*sharpe return x/qp(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def calmarnorm(sharpe, T, tau = 1.0): ''' Multiplicator for normalizing calmar ratio to period tau ''' return calmar(sharpe,tau)/calmar(sharpe,T)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_links(converted_text, html): """ Add the links to the bottom of the text """
soup = BeautifulSoup(html, 'html.parser') link_exceptions = [ 'footnote-reference', 'fn-backref', 'citation-reference' ] footnotes = {} citations = {} backrefs = {} links = soup.find_all('a') for link in links: href = link.get('href') text = process_tag(link) classes = dict(link.attrs).get('class', '') if 'footnote-reference' in classes: footnotes[href] = '#' + link.get('id') elif 'citation-reference' in classes: text = process_tag(link) citations[text] = '#' + link.get('id') elif 'fn-backref' in classes: sibling = link.findNext('td') text = process_tag(sibling) backrefs[href] = text excepted_link = False for class_type in classes: if class_type in link_exceptions: excepted_link = True if not excepted_link: if text.endswith('_'): text = text[0:-1] if len(text.split(' ')) > 1: text = text[1:-1] converted_text += '.. _' + text + ': ' + href + '\n' if len(footnotes.keys()) > 0: converted_text += '\n' for key in footnotes.keys(): text = backrefs[footnotes[key]] converted_text += '.. [' + key + '] ' + text + '\n' if len(citations.keys()) > 0: converted_text += '\n' for key in citations.keys(): text = backrefs[citations[key]] converted_text += '.. ' + key[0:-1] + ' ' + text + '\n' return converted_text.rstrip()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_consensus_tree(self, cutoff=0.0, best_tree=None): """ Returns an extended majority rule consensus tree as a Toytree object. Node labels include 'support' values showing the occurrence of clades in the consensus tree across trees in the input treelist. Clades with support below 'cutoff' are collapsed into polytomies. If you enter an optional 'best_tree' then support values from the treelist calculated for clades in this tree, and the best_tree is returned with support values added to nodes. Params ------ cutoff (float; default=0.0): Cutoff below which clades are collapsed in the majority rule consensus tree. This is a proportion (e.g., 0.5 means 50%). best_tree (Toytree; optional): A tree that support values should be calculated for and added to. For example, you want to calculate how often clades in your best ML tree are supported in 100 bootstrap trees. """
if best_tree: raise NotImplementedError("best_tree option not yet supported.") cons = ConsensusTree(self.treelist, cutoff) cons.update() return cons.ttree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def hash_trees(self): "hash ladderized tree topologies" observed = {} for idx, tree in enumerate(self.treelist): nwk = tree.write(tree_format=9) hashed = md5(nwk.encode("utf-8")).hexdigest() if hashed not in observed: observed[hashed] = idx self.treedict[idx] = 1 else: idx = observed[hashed] self.treedict[idx] += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def find_clades(self): "Count clade occurrences." # index names from the first tree ndict = {j: i for i, j in enumerate(self.names)} namedict = {i: j for i, j in enumerate(self.names)} # store counts clade_counts = {} for tidx, ncopies in self.treedict.items(): # testing on unrooted trees is easiest but for some reason slow ttree = self.treelist[tidx].unroot() # traverse over tree for node in ttree.treenode.traverse('preorder'): bits = np.zeros(len(ttree), dtype=np.bool_) for child in node.iter_leaf_names(): bits[ndict[child]] = True # get bit string and its reverse bitstring = bits.tobytes() revstring = np.invert(bits).tobytes() # add to clades first time, then check for inverse next hits if bitstring in clade_counts: clade_counts[bitstring] += ncopies else: if revstring not in clade_counts: clade_counts[bitstring] = ncopies else: clade_counts[revstring] += ncopies # convert to freq for key, val in clade_counts.items(): clade_counts[key] = val / float(len(self.treelist)) ## return in sorted order self.namedict = namedict self.clade_counts = sorted( clade_counts.items(), key=lambda x: x[1], reverse=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def filter_clades(self): "Remove conflicting clades and those < cutoff to get majority rule" passed = [] carrs = np.array([list(i[0]) for i in self.clade_counts], dtype=int) freqs = np.array([i[1] for i in self.clade_counts]) for idx in range(carrs.shape[0]): conflict = False if freqs[idx] < self.cutoff: continue for pidx in passed: intersect = np.max(carrs[idx] + carrs[pidx]) > 1 # is either one a subset of the other? subset_test0 = np.all(carrs[idx] - carrs[pidx] >= 0) subset_test1 = np.all(carrs[pidx] - carrs[idx] >= 0) if intersect: if (not subset_test0) and (not subset_test1): conflict = True if not conflict: passed.append(idx) rclades = [] for idx in passed: rclades.append((carrs[idx], freqs[idx])) self.fclade_counts = rclades
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def build_trees(self): "Build an unrooted consensus tree from filtered clade counts." # storage nodes = {} idxarr = np.arange(len(self.fclade_counts[0][0])) queue = [] ## create dict of clade counts and set keys countdict = defaultdict(int) for clade, count in self.fclade_counts: mask = np.int_(list(clade)).astype(np.bool) ccx = idxarr[mask] queue.append((len(ccx), frozenset(ccx))) countdict[frozenset(ccx)] = count while queue: queue.sort() (clade_size, clade) = queue.pop(0) new_queue = [] # search for ancestors of clade for (_, ancestor) in queue: if clade.issubset(ancestor): # update ancestor such that, in the following example: # ancestor == {1, 2, 3, 4} # clade == {2, 3} # new_ancestor == {1, {2, 3}, 4} new_ancestor = (ancestor - clade) | frozenset([clade]) countdict[new_ancestor] = countdict.pop(ancestor) ancestor = new_ancestor new_queue.append((len(ancestor), ancestor)) # if the clade is a tip, then we have a name if clade_size == 1: name = list(clade)[0] name = self.namedict[name] else: name = None # the clade will not be in nodes if it is a tip children = [nodes.pop(c) for c in clade if c in nodes] node = TreeNode(name=name) for child in children: node.add_child(child) if not node.is_leaf(): node.dist = int(round(100 * countdict[clade])) node.support = int(round(100 * countdict[clade])) else: node.dist = int(100) node.support = int(100) nodes[clade] = node queue = new_queue nodelist = list(nodes.values()) tre = nodelist[0] #tre.unroot() ## return the tree and other trees if present self.ttree = ToyTree(tre.write(format=0)) self.ttree._coords.update() self.nodelist = nodelist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sounds_like(self, word1, word2): """Compare the phonetic representations of 2 words, and return a boolean value."""
return self.phonetics(word1) == self.phonetics(word2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distance(self, word1, word2, metric='levenshtein'): """Get the similarity of the words, using the supported distance metrics."""
if metric in self.distances: distance_func = self.distances[metric] return distance_func(self.phonetics(word1), self.phonetics(word2)) else: raise DistanceMetricError('Distance metric not supported! Choose from levenshtein, hamming.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_output_row_heights(table, spans): """ Get the heights of the rows of the output table. Parameters table : list of lists of str spans : list of lists of int Returns ------- heights : list of int The heights of each row in the output table """
heights = [] for row in table: heights.append(-1) for row in range(len(table)): for column in range(len(table[row])): text = table[row][column] span = get_span(spans, row, column) row_count = get_span_row_count(span) height = len(text.split('\n')) if row_count == 1 and height > heights[row]: heights[row] = height for row in range(len(table)): for column in range(len(table[row])): span = get_span(spans, row, column) row_count = get_span_row_count(span) if row_count > 1: text_row = span[0][0] text_column = span[0][1] end_row = text_row + row_count text = table[text_row][text_column] height = len(text.split('\n')) - (row_count - 1) add_row = 0 while height > sum(heights[text_row:end_row]): heights[text_row + add_row] += 1 if add_row + 1 < row_count: add_row += 1 else: add_row = 0 return heights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def smedian(olist,nobs): '''Generalised media for odd and even number of samples''' if nobs: rem = nobs % 2 midpoint = nobs // 2 me = olist[midpoint] if not rem: me = 0.5 * (me + olist[midpoint-1]) return me else: return NaN
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def roll_mean(input, window): '''Apply a rolling mean function to an array. This is a simple rolling aggregation.''' nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def roll_sd(input, window, scale = 1.0, ddof = 0): '''Apply a rolling standard deviation function to an array. This is a simple rolling aggregation of squared sums.''' nobs, i, j, sx, sxx = 0,0,0,0.,0. N = len(input) sqrt = np.sqrt if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sx += val sxx += val*val nn = nobs - ddof output[j] = NaN if nn<=0 else sqrt(scale * (sxx - sx*sx/nobs) / nn) for val in input[window:]: prev = input[j] if prev == prev: sx -= prev sxx -= prev*prev nobs -= 1 if val == val: nobs += 1 sx += val sxx += val*val j += 1 nn = nobs - ddof output[j] = NaN if nn<=0 else sqrt(scale * (sxx - sx*sx/nobs) / nn) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_span(span, table): """ Ensure the span is valid. A span is a list of [row, column] pairs. These coordinates must form a rectangular shape. For example, this span will cause an error because it is not rectangular in shape.:: span = [[0, 1], [0, 2], [1, 0]] Spans must be * Rectanglular * A list of lists of int * Parameters span : list of lists of int table : list of lists of str Return ------ exception_string : str A message that states there was something wrong. """
if not type(span) is list: return "Spans must be a list of lists" for pair in span: if not type(pair) is list: return "Spans must be a list of lists of int" if not len(pair) == 2: return "Spans must be a [Row, Column] pair of integers" total_rows = get_span_row_count(span) total_columns = get_span_column_count(span) if not len(span) == total_rows * total_columns: return ''.join(["Spans must be rectangular in shape. ", str(span) + " is invalid"]) if max(span, key=lambda x: x[0])[0] > len(table) - 1: return ' '.join(["One of the span's rows extends beyond the", "bounds of the table:", str(span)]) if max(span, key=lambda x: x[1])[1] > len(table[0]) - 1: return ' '.join(["One of the span's columns extends beyond the", "bounds of the table:", str(span)]) test_span = copy.deepcopy(span) checked = [test_span.pop(0)] while len(test_span) > 0: row = test_span[0][0] col = test_span[0][1] matched = False for i in range(len(checked)): if row == checked[i][0] and abs(col - checked[i][1]) == 1: matched = True elif abs(row - checked[i][0]) == 1 and col == checked[i][1]: matched = True if matched: checked.append(test_span.pop(0)) else: checked.extend(test_span) return 'This span is not valid: ' + str(checked) return ""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_all_cells(cells): """ Loop through list of cells and piece them together one by one Parameters cells : list of dashtable.data2rst.Cell Returns ------- grid_table : str The final grid table """
current = 0 while len(cells) > 1: count = 0 while count < len(cells): cell1 = cells[current] cell2 = cells[count] merge_direction = get_merge_direction(cell1, cell2) if not merge_direction == "NONE": merge_cells(cell1, cell2, merge_direction) if current > count: current -= 1 cells.pop(count) else: count += 1 current += 1 if current >= len(cells): current = 0 return cells[0].text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def bpp2newick(bppnewick): "converts bpp newick format to normal newick" regex1 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[:]") regex2 = re.compile(r" #[-+]?[0-9]*\.?[0-9]*[;]") regex3 = re.compile(r": ") new = regex1.sub(":", bppnewick) new = regex2.sub(";", new) new = regex3.sub(":", new) return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def return_small_clade(treenode): "used to produce balanced trees, returns a tip node from the smaller clade" node = treenode while 1: if node.children: c1, c2 = node.children node = sorted([c1, c2], key=lambda x: len(x.get_leaves()))[0] else: return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_scale_root_height(self, treeheight=1): """ Returns a toytree copy with all nodes scaled so that the root height equals the value entered for treeheight. """
# make tree height = 1 * treeheight ctree = self._ttree.copy() _height = ctree.treenode.height for node in ctree.treenode.traverse(): node.dist = (node.dist / _height) * treeheight ctree._coords.update() return ctree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node_slider(self, seed=None): """ Returns a toytree copy with node heights modified while retaining the same topology but not necessarily node branching order. Node heights are moved up or down uniformly between their parent and highest child node heights in 'levelorder' from root to tips. The total tree height is retained at 1.0, only relative edge lengths change. """
# I don't think user's should need to access prop prop = 0.999 assert isinstance(prop, float), "prop must be a float" assert prop < 1, "prop must be a proportion >0 and < 1." random.seed(seed) ctree = self._ttree.copy() for node in ctree.treenode.traverse(): ## slide internal nodes if node.up and node.children: ## get min and max slides minjit = max([i.dist for i in node.children]) * prop maxjit = (node.up.height * prop) - node.height newheight = random.uniform(-minjit, maxjit) ## slide children for child in node.children: child.dist += newheight ## slide self to match node.dist -= newheight ctree._coords.update() return ctree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def baltree(ntips, treeheight=1.0): """ Returns a balanced tree topology. """
# require even number of tips if ntips % 2: raise ToytreeError("balanced trees must have even number of tips.") # make first cherry rtree = toytree.tree() rtree.treenode.add_child(name="0") rtree.treenode.add_child(name="1") # add tips in a balanced way for i in range(2, ntips): # get node to split node = return_small_clade(rtree.treenode) # add two children node.add_child(name=node.name) node.add_child(name=str(i)) # rename ancestral node node.name = None # rename tips so names are in order idx = 0 for node in rtree.treenode.traverse("postorder"): if node.is_leaf(): node.name = str(idx) idx += 1 # get toytree from newick tre = toytree.tree(rtree.write(tree_format=9)) tre = tre.mod.make_ultrametric() self = tre.mod.node_scale_root_height(treeheight) self._coords.update() return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_span_char_height(span, row_heights): """ Get the height of a span in the number of newlines it fills. Parameters span : list of list of int A list of [row, column] pairs that make up the span row_heights : list of int A list of the number of newlines for each row in the table Returns ------- total_height : int The height of the span in number of newlines """
start_row = span[0][0] row_count = get_span_row_count(span) total_height = 0 for i in range(start_row, start_row + row_count): total_height += row_heights[i] total_height += row_count - 1 return total_height
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def html2data(html_string): """ Convert an html table to a data table and spans. Parameters html_string : str The string containing the html table Returns ------- table : list of lists of str spans : list of lists of lists of int A span is a list of [row, column] pairs that define what cells are merged in a table. use_headers : bool """
spans = extract_spans(html_string) column_count = get_html_column_count(html_string) row_count = get_html_row_count(spans) count = 0 while count < len(spans): if len(spans[count]) == 1: spans.pop(count) else: count += 1 table = extract_table(html_string, row_count, column_count) use_headers = headers_present(html_string) return table, spans, use_headers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def newick(self, tree_format=0): "Returns newick represenation of the tree in its current state." # checks one of root's children for features and extra feats. if self.treenode.children: features = {"name", "dist", "support", "height", "idx"} testnode = self.treenode.children[0] extrafeat = {i for i in testnode.features if i not in features} features.update(extrafeat) return self.treenode.write(format=tree_format)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_node_values( self, feature=None, show_root=False, show_tips=False, ): """ Returns node values from tree object in node plot order. To modify values you must modify the .treenode object directly by setting new 'features'. For example for node in ttree.treenode.traverse(): node.add_feature("PP", 100) By default node and tip values are hidden (set to "") so that they are not shown on the tree plot. To include values for these nodes use the 'show_root'=True, or 'show_tips'=True arguments. tree.get_node_values("support", True, True) """
# access nodes in the order they will be plotted ndict = self.get_node_dict(return_internal=True, return_nodes=True) nodes = [ndict[i] for i in range(self.nnodes)[::-1]] # get features if feature: vals = [i.__getattribute__(feature) if hasattr(i, feature) else "" for i in nodes] else: vals = [" " for i in nodes] # apply hiding rules if not show_root: vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)] if not show_tips: vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)] # convert float to ints for prettier printing unless all floats # raise exception and skip if there are true strings (names) try: if all([Decimal(str(i)) % 1 == 0 for i in vals if i]): vals = [int(i) if isinstance(i, float) else i for i in vals] except Exception: pass return vals
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tip_coordinates(self, axis=None): """ Returns coordinates of the tip positions for a tree. If no argument for axis then a 2-d array is returned. The first column is the x coordinates the second column is the y-coordinates. If you enter an argument for axis then a 1-d array will be returned of just that axis. """
# get coordinates array coords = self.get_node_coordinates() if axis == 'x': return coords[:self.ntips, 0] elif axis == 'y': return coords[:self.ntips, 1] return coords[:self.ntips]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rotate_node( self, names=None, wildcard=None, regex=None, idx=None, # modify_tree=False, ): """ Returns a ToyTree with the selected node rotated for plotting. """
# make a copy revd = {j: i for (i, j) in enumerate(self.get_tip_labels())} neworder = {} # get node to rotate treenode = fuzzy_match_tipnames( self, names, wildcard, regex, True, True) children = treenode.up.children names = [[j.name for j in i.get_leaves()] for i in children] nidxs = [[revd[i] for i in j] for j in names] # get size of the big clade move = max((len(i) for i in nidxs)) if len(nidxs[0]) > len(nidxs[1]): move = min((len(i) for i in nidxs)) # newdict cnames = list(itertools.chain(*names)) tdict = {i: None for i in cnames} cycle = itertools.cycle(itertools.chain(*nidxs)) for m in range(move): next(cycle) for t in cnames: tdict[t] = next(cycle) for key in revd: if key in tdict: neworder[key] = tdict[key] else: neworder[key] = revd[key] revd = {j: i for (i, j) in neworder.items()} neworder = [revd[i] for i in range(self.ntips)] # returns a new tree (i.e., copy) modified w/ a fixed order nself = ToyTree(self.newick, fixed_order=neworder) nself._coords.update() return nself
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resolve_polytomy( self, dist=1.0, support=100, recursive=True): """ Returns a copy of the tree with all polytomies randomly resolved. Does not transform tree in-place. """
nself = self.copy() nself.treenode.resolve_polytomy( default_dist=dist, default_support=support, recursive=recursive) nself._coords.update() return nself
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unroot(self): """ Returns a copy of the tree unrooted. Does not transform tree in-place. """
nself = self.copy() nself.treenode.unroot() nself.treenode.ladderize() nself._coords.update() return nself
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_merge_direction(cell1, cell2): """ Determine the side of cell1 that can be merged with cell2. This is based on the location of the two cells in the table as well as the compatability of their height and width. For example these cells can merge:: cell1 cell2 merge "RIGHT" +-----+ +------+ +-----+------+ | foo | | dog | | foo | dog | | | +------+ | +------+ | | | cat | | | cat | | | +------+ | +------+ | | | bird | | | bird | +-----+ +------+ +-----+------+ But these cells cannot merge:: +-----+ +------+ | foo | | dog | | | +------+ | | | cat | | | +------+ | | +-----+ Parameters cell1 : dashtable.data2rst.Cell cell2 : dashtable.data2rst.Cell Returns ------- str The side onto which cell2 can be merged. Will be one of ["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"] """
cell1_left = cell1.column cell1_right = cell1.column + cell1.column_count cell1_top = cell1.row cell1_bottom = cell1.row + cell1.row_count cell2_left = cell2.column cell2_right = cell2.column + cell2.column_count cell2_top = cell2.row cell2_bottom = cell2.row + cell2.row_count if (cell1_right == cell2_left and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1.right_sections >= cell2.left_sections): return "RIGHT" elif (cell1_left == cell2_left and cell1_right == cell2_right and cell1_top == cell2_bottom and cell1.top_sections >= cell2.bottom_sections): return "TOP" elif (cell1_left == cell2_left and cell1_right == cell2_right and cell1_bottom == cell2_top and cell1.bottom_sections >= cell2.top_sections): return "BOTTOM" elif (cell1_left == cell2_right and cell1_top == cell2_top and cell1_bottom == cell2_bottom and cell1.left_sections >= cell2.right_sections): return "LEFT" else: return "NONE"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_nexus(self): "get newick data from NEXUS" if self.data[0].strip().upper() == "#NEXUS": nex = NexusParser(self.data) self.data = nex.newicks self.tdict = nex.tdict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def extract_tree_block(self): "iterate through data file to extract trees" lines = iter(self.data) while 1: try: line = next(lines).strip() except StopIteration: break # enter trees block if line.lower() == "begin trees;": while 1: # iter through trees block sub = next(lines).strip().split() # skip if a blank line if not sub: continue # look for translation if sub[0].lower() == "translate": while sub[0] != ";": sub = next(lines).strip().split() self.tdict[sub[0]] = sub[-1].strip(",") # parse tree blocks if sub[0].lower().startswith("tree"): self.newicks.append(sub[-1]) # end of trees block if sub[0].lower() == "end;": break
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def push_git_package(self): """ if no conflicts then write new tag to """
## check for conflicts, then write to local files self._pull_branch_from_origin() ## log commits to releasenotes if self.deploy: self._write_commits_to_release_notes() ## writes tag or 'devel' to try: self._write_new_tag_to_init() self._write_branch_and_tag_to_meta_yaml() self._push_new_tag_to_git() except Exception as inst: print("\n Error:\n", inst) self._revert_tag_in_init() sys.exit(2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_init_release_tag(self): """ parses init.py to get previous version """
self.init_version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", open(self.init_file, "r").read(), re.M).group(1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_log_commits(self): """ calls git log to complile a change list """
## check if update is necessary cmd = "git log --pretty=oneline {}..".format(self.init_version) cmdlist = shlex.split(cmd) commits = subprocess.check_output(cmdlist) ## Split off just the first element, we don't need commit tag self.commits = [x.split(" ", 1) for x in commits.split("\n")]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_commits_to_release_notes(self): """ writes commits to the releasenotes file by appending to the end """
with open(self.release_file, 'a') as out: out.write("==========\n{}\n".format(self.tag)) for commit in self.commits: try: msg = commit[1] if msg != "cosmetic": out.write("-" + msg + "\n") except: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _write_branch_and_tag_to_meta_yaml(self): """ Write branch and tag to meta.yaml by editing in place """
## set the branch to pull source from with open(self.meta_yaml.replace("meta", "template"), 'r') as infile: dat = infile.read() newdat = dat.format(**{'tag': self.tag, 'branch': self.branch}) with open(self.meta_yaml, 'w') as outfile: outfile.write(newdat)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_conda_packages(self): """ Run the Linux build and use converter to build OSX """
## check if update is necessary #if self.nversion == self.pversion: # raise SystemExit("Exited: new version == existing version") ## tmp dir bldir = "./tmp-bld" if not os.path.exists(bldir): os.makedirs(bldir) ## iterate over builds for pybuild in ["2.7", "3"]: ## build and upload Linux to anaconda.org build = api.build( "conda-recipe/{}".format(self.package), python=pybuild) ## upload Linux build if not self.deploy: cmd = ["anaconda", "upload", build[0], "--label", "test", "--force"] else: cmd = ["anaconda", "upload", build[0]] err = subprocess.Popen(cmd).communicate() ## build OSX copies api.convert(build[0], output_dir=bldir, platforms=["osx-64"]) osxdir = os.path.join(bldir, "osx-64", os.path.basename(build[0])) if not self.deploy: cmd = ["anaconda", "upload", osxdir, "--label", "test", "--force"] else: cmd = ["anaconda", "upload", osxdir] err = subprocess.Popen(cmd).communicate() ## cleanup tmpdir shutil.rmtree(bldir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_span_row_count(span): """ Gets the number of rows included in a span Parameters span : list of lists of int The [row, column] pairs that make up the span Returns ------- rows : int The number of rows included in the span Example ------- Consider this table:: +--------+-----+ | foo | bar | +--------+ | | spam | | +--------+ | | goblet | | +--------+-----+ :: 3 """
rows = 1 first_row = span[0][0] for i in range(len(span)): if span[i][0] > first_row: rows += 1 first_row = span[i][0] return rows
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def asarray(x, dtype=None): '''Convert ``x`` into a ``numpy.ndarray``.''' iterable = scalarasiter(x) if isinstance(iterable, ndarray): return iterable else: if not hasattr(iterable, '__len__'): iterable = list(iterable) if dtype == object_type: a = ndarray((len(iterable),), dtype=dtype) for i,v in enumerate(iterable): a[i] = v return a else: return array(iterable, dtype=dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def ascolumn(x, dtype = None): '''Convert ``x`` into a ``column``-type ``numpy.ndarray``.''' x = asarray(x, dtype) return x if len(x.shape) >= 2 else x.reshape(len(x),1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def multis_2_mono(table): """ Converts each multiline string in a table to single line. Parameters table : list of list of str A list of rows containing strings Returns ------- table : list of lists of str """
for row in range(len(table)): for column in range(len(table[row])): table[row][column] = table[row][column].replace('\n', ' ') return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_html_row_count(spans): """Get the number of rows"""
if spans == []: return 0 row_counts = {} for span in spans: span = sorted(span) try: row_counts[str(span[0][1])] += get_span_row_count(span) except KeyError: row_counts[str(span[0][1])] = get_span_row_count(span) values = list(row_counts.values()) return max(values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def better_ts_function(f): '''Decorator which check if timeseries has a better implementation of the function.''' fname = f.__name__ def _(ts, *args, **kwargs): func = getattr(ts, fname, None) if func: return func(*args, **kwargs) else: return f(ts, *args, **kwargs) _.__name__ = fname return _
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def prange(ts, **kwargs): '''Rolling Percentage range. Value between 0 and 1 indicating the position in the rolling range. ''' mi = ts.rollmin(**kwargs) ma = ts.rollmax(**kwargs) return (ts - mi)/(ma - mi)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def bindata(data, maxbins = 30, reduction = 0.1): ''' data must be numeric list with a len above 20 This function counts the number of data points in a reduced array ''' tole = 0.01 N = len(data) assert N > 20 vmin = min(data) vmax = max(data) DV = vmax - vmin tol = tole*DV vmax += tol if vmin >= 0: vmin -= tol vmin = max(0.0,vmin) else: vmin -= tol n = min(maxbins,max(2,int(round(reduction*N)))) DV = vmax - vmin bbin = npy.linspace(vmin,vmax,n+1) sso = npy.searchsorted(bbin,npy.sort(data)) x = [] y = [] for i in range(0,n): x.append(0.5*(bbin[i+1]+bbin[i])) y.append(0.0) dy = 1.0/N for i in sso: y[i-1] += dy/(bbin[i]-bbin[i-1]) return (x,y)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def binOp(op, indx, amap, bmap, fill_vec): ''' Combines the values from two map objects using the indx values using the op operator. In situations where there is a missing value it will use the callable function handle_missing ''' def op_or_missing(id): va = amap.get(id, None) vb = bmap.get(id, None) if va is None or vb is None: # This should create as many elements as the number of columns!? result = fill_vec else: try: result = op(va, vb) except Exception: result = None if result is None: result = fill_vec return result seq_arys = map(op_or_missing, indx) data = np.vstack(seq_arys) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def center_line(space, line): """ Add leading & trailing space to text to center it within an allowed width Parameters space : int The maximum character width allowed for the text. If the length of text is more than this value, no space will be added.\ line : str The text that will be centered. Returns ------- line : str The text with the leading space added to it """
line = line.strip() left_length = math.floor((space - len(line)) / 2) right_length = math.ceil((space - len(line)) / 2) left_space = " " * int(left_length) right_space = " " * int(right_length) line = ''.join([left_space, line, right_space]) return line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(self, function): """Register a function in the function registry. The function will be automatically instantiated if not already an instance. """
function = inspect.isclass(function) and function() or function name = function.name self[name] = function
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unregister(self, name): """Unregister function by name. """
try: name = name.name except AttributeError: pass return self.pop(name,None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def row_includes_spans(table, row, spans): """ Determine if there are spans within a row Parameters table : list of lists of str row : int spans : list of lists of lists of int Returns ------- bool Whether or not a table's row includes spans """
for column in range(len(table[row])): for span in spans: if [row, column] in span: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _setup_states(state_definitions, prev=()): """Create a StateList object from a 'states' Workflow attribute."""
states = list(prev) for state_def in state_definitions: if len(state_def) != 2: raise TypeError( "The 'state' attribute of a workflow should be " "a two-tuple of strings; got %r instead." % (state_def,) ) name, title = state_def state = State(name, title) if any(st.name == name for st in states): # Replacing an existing state states = [state if st.name == name else st for st in states] else: states.append(state) return StateList(states)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _setup_transitions(tdef, states, prev=()): """Create a TransitionList object from a 'transitions' Workflow attribute. Args: tdef: list of transition definitions states (StateList): already parsed state definitions. prev (TransitionList): transition definitions from a parent. Returns: TransitionList: the list of transitions defined in the 'tdef' argument. """
trs = list(prev) for transition in tdef: if len(transition) == 3: (name, source, target) = transition if is_string(source) or isinstance(source, State): source = [source] source = [states[src] for src in source] target = states[target] tr = Transition(name, source, target) else: raise TypeError( "Elements of the 'transition' attribute of a " "workflow should be three-tuples; got %r instead." % (transition,) ) if any(prev_tr.name == tr.name for prev_tr in trs): # Replacing an existing state trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs] else: trs.append(tr) return TransitionList(trs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transition(trname='', field='', check=None, before=None, after=None): """Decorator to declare a function as a transition implementation."""
if is_callable(trname): raise ValueError( "The @transition decorator should be called as " "@transition(['transition_name'], **kwargs)") if check or before or after: warnings.warn( "The use of check=, before= and after= in @transition decorators is " "deprecated in favor of @transition_check, @before_transition and " "@after_transition decorators.", DeprecationWarning, stacklevel=2) return TransitionWrapper(trname, field=field, check=check, before=before, after=after)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_hook_dict(fun): """Ensure the given function has a xworkflows_hook attribute. That attribute has the following structure: """
if not hasattr(fun, 'xworkflows_hook'): fun.xworkflows_hook = { HOOK_BEFORE: [], HOOK_AFTER: [], HOOK_CHECK: [], HOOK_ON_ENTER: [], HOOK_ON_LEAVE: [], } return fun.xworkflows_hook
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _match_state(self, state): """Checks whether a given State matches self.names."""
return (self.names == '*' or state in self.names or state.name in self.names)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _match_transition(self, transition): """Checks whether a given Transition matches self.names."""
return (self.names == '*' or transition in self.names or transition.name in self.names)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pre_transition_checks(self): """Run the pre-transition checks."""
current_state = getattr(self.instance, self.field_name) if current_state not in self.transition.source: raise InvalidTransitionError( "Transition '%s' isn't available from state '%s'." % (self.transition.name, current_state.name)) for check in self._filter_hooks(HOOK_CHECK): if not check(self.instance): raise ForbiddenTransition( "Transition '%s' was forbidden by " "custom pre-transition check." % self.transition.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_hooks(self, *hook_kinds): """Filter a list of hooks, keeping only applicable ones."""
hooks = sum((self.hooks.get(kind, []) for kind in hook_kinds), []) return sorted(hook for hook in hooks if hook.applies_to(self.transition, self.current_state))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _post_transition(self, result, *args, **kwargs): """Performs post-transition actions."""
for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER): hook(self.instance, result, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_parent_implems(self, parent_implems): """Import previously defined implementations. Args: parent_implems (ImplementationList): List of implementations defined in a parent class. """
for trname, attr, implem in parent_implems.get_custom_implementations(): self.implementations[trname] = implem.copy() self.transitions_at[trname] = attr self.custom_implems.add(trname)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_implem(self, transition, attribute, function, **kwargs): """Add an implementation. Args: transition (Transition): the transition for which the implementation is added attribute (str): the name of the attribute where the implementation will be available function (callable): the actual implementation function **kwargs: extra arguments for the related ImplementationProperty. """
implem = ImplementationProperty( field_name=self.state_field, transition=transition, workflow=self.workflow, implementation=function, **kwargs) self.implementations[transition.name] = implem self.transitions_at[transition.name] = attribute return implem