text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns an iterator over all descendant nodes.
<END_TASK>
<USER_TASK:>
Description:
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None):
""" Returns an iterator over all descendant nodes.""" |
for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn):
if n is not self:
yield n |
<SYSTEM_TASK:>
Iterate over all desdecendant nodes.
<END_TASK>
<USER_TASK:>
Description:
def _iter_descendants_levelorder(self, is_leaf_fn=None):
""" Iterate over all desdecendant nodes.""" |
tovisit = deque([self])
while len(tovisit) > 0:
node = tovisit.popleft()
yield node
if not is_leaf_fn or not is_leaf_fn(node):
tovisit.extend(node.children) |
<SYSTEM_TASK:>
Iterator over all descendant nodes.
<END_TASK>
<USER_TASK:>
Description:
def _iter_descendants_preorder(self, is_leaf_fn=None):
""" Iterator over all descendant nodes. """ |
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None |
<SYSTEM_TASK:>
Iterates over the list of all ancestor nodes from
<END_TASK>
<USER_TASK:>
Description:
def iter_ancestors(self):
"""
Iterates over the list of all ancestor nodes from
current node to the current tree root.
""" |
node = self
while node.up is not None:
yield node.up
node = node.up |
<SYSTEM_TASK:>
Returns the absolute root node of current tree structure.
<END_TASK>
<USER_TASK:>
Description:
def get_tree_root(self):
""" Returns the absolute root node of current tree structure.""" |
root = self
while root.up is not None:
root = root.up
return root |
<SYSTEM_TASK:>
Returns the first common ancestor between this node and a given
<END_TASK>
<USER_TASK:>
Description:
def get_common_ancestor(self, *target_nodes, **kargs):
"""
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
""" |
get_path = kargs.get("get_path", False)
if len(target_nodes) == 1 and type(target_nodes[0]) \
in set([set, tuple, list, frozenset]):
target_nodes = target_nodes[0]
# Convert node names into node instances
target_nodes = _translate_nodes(self, *target_nodes)
# If only one node is provided, use self as the second target
if type(target_nodes) != list:
target_nodes = [target_nodes, self]
n2path = {}
reference = []
ref_node = None
for n in target_nodes:
current = n
while current:
n2path.setdefault(n, set()).add(current)
if not ref_node:
reference.append(current)
current = current.up
if not ref_node:
ref_node = n
common = None
for n in reference:
broken = False
for node, path in six.iteritems(n2path):
if node is not ref_node and n not in path:
broken = True
break
if not broken:
common = n
break
if not common:
raise TreeError("Nodes are not connected!")
if get_path:
return common, n2path
else:
return common |
<SYSTEM_TASK:>
Search nodes in an interative way. Matches are being yield as
<END_TASK>
<USER_TASK:>
Description:
def iter_search_nodes(self, **conditions):
"""
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
""" |
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n |
<SYSTEM_TASK:>
Returns the node's farthest descendant or ancestor node, and the
<END_TASK>
<USER_TASK:>
Description:
def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
""" |
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(
topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(
topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist |
<SYSTEM_TASK:>
Returns the node that divides the current tree into two
<END_TASK>
<USER_TASK:>
Description:
def get_midpoint_outgroup(self):
"""
Returns the node that divides the current tree into two
distance-balanced partitions.
""" |
# Gets the farthest node to the current root
root = self.get_tree_root()
nA, r2A_dist = root.get_farthest_leaf()
nB, A2B_dist = nA.get_farthest_node()
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None:
cdist += current.dist
if cdist > (middist): # Deja de subir cuando se pasa del maximo
break
else:
current = current.up
return current |
<SYSTEM_TASK:>
Generates a random topology by populating current node.
<END_TASK>
<USER_TASK:>
Description:
def populate(self,
size,
names_library=None,
reuse_names=False,
random_branches=False,
branch_range=(0, 1),
support_range=(0, 1)):
"""
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
""" |
NewNode = self.__class__
if len(self.children) > 1:
connector = NewNode()
for ch in self.get_children():
ch.detach()
connector.add_child(child = ch)
root = NewNode()
self.add_child(child = connector)
self.add_child(child = root)
else:
root = self
next_deq = deque([root])
for i in range(size-1):
if random.randint(0, 1):
p = next_deq.pop()
else:
p = next_deq.popleft()
c1 = p.add_child()
c2 = p.add_child()
next_deq.extend([c1, c2])
if random_branches:
c1.dist = random.uniform(*branch_range)
c2.dist = random.uniform(*branch_range)
c1.support = random.uniform(*branch_range)
c2.support = random.uniform(*branch_range)
else:
c1.dist = 1.0
c2.dist = 1.0
c1.support = 1.0
c2.support = 1.0
# next contains leaf nodes
charset = "abcdefghijklmnopqrstuvwxyz"
if names_library:
names_library = deque(names_library)
else:
avail_names = itertools.combinations_with_replacement(charset, 10)
for n in next_deq:
if names_library:
if reuse_names:
tname = random.sample(names_library, 1)[0]
else:
tname = names_library.pop()
else:
tname = ''.join(next(avail_names))
n.name = tname |
<SYSTEM_TASK:>
Sets a descendant node as the outgroup of a tree. This function
<END_TASK>
<USER_TASK:>
Description:
def set_outgroup(self, outgroup):
"""
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
Parameters:
-----------
outgroup:
a node instance within the same tree structure that will be
used as a basal node.
""" |
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
##return
## why raise an error for this?
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree root
n = outgroup
while n.up is not self:
n = n.up
# If outgroup is a child from root, but with more than one
# sister nodes, creates a new node to group them
self.children.remove(n)
if len(self.children) != 1:
down_branch_connector = self.__class__()
down_branch_connector.dist = 0.0
down_branch_connector.support = n.support
for ch in self.get_children():
down_branch_connector.children.append(ch)
ch.up = down_branch_connector
self.children.remove(ch)
else:
down_branch_connector = self.children[0]
# Connects down branch to myself or to outgroup
quien_va_ser_padre = parent_outgroup
if quien_va_ser_padre is not self:
# Parent-child swapping
quien_va_ser_hijo = quien_va_ser_padre.up
quien_fue_padre = None
buffered_dist = quien_va_ser_padre.dist
buffered_support = quien_va_ser_padre.support
while quien_va_ser_hijo is not self:
quien_va_ser_padre.children.append(quien_va_ser_hijo)
quien_va_ser_hijo.children.remove(quien_va_ser_padre)
buffered_dist2 = quien_va_ser_hijo.dist
buffered_support2 = quien_va_ser_hijo.support
quien_va_ser_hijo.dist = buffered_dist
quien_va_ser_hijo.support = buffered_support
buffered_dist = buffered_dist2
buffered_support = buffered_support2
quien_va_ser_padre.up = quien_fue_padre
quien_fue_padre = quien_va_ser_padre
quien_va_ser_padre = quien_va_ser_hijo
quien_va_ser_hijo = quien_va_ser_padre.up
quien_va_ser_padre.children.append(down_branch_connector)
down_branch_connector.up = quien_va_ser_padre
quien_va_ser_padre.up = quien_fue_padre
down_branch_connector.dist += buffered_dist
outgroup2 = parent_outgroup
parent_outgroup.children.remove(outgroup)
outgroup2.dist = 0
else:
outgroup2 = down_branch_connector
outgroup.up = self
outgroup2.up = self
# outgroup is always the first children. Some function my
# trust on this fact, so do no change this.
self.children = [outgroup,outgroup2]
middist = (outgroup2.dist + outgroup.dist)/2
outgroup.dist = middist
outgroup2.dist = middist
outgroup2.support = outgroup.support |
<SYSTEM_TASK:>
Unroots current node. This function is expected to be used on
<END_TASK>
<USER_TASK:>
Description:
def unroot(self):
"""
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
""" |
if len(self.children)==2:
if not self.children[0].is_leaf():
self.children[0].delete()
elif not self.children[1].is_leaf():
self.children[1].delete()
else:
raise TreeError("Cannot unroot a tree with only two leaves") |
<SYSTEM_TASK:>
Returns the ASCII representation of the tree.
<END_TASK>
<USER_TASK:>
Description:
def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None):
"""
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
""" |
if not attributes:
attributes = ["name"]
# toytree edit:
# removed six dependency for map with comprehension
# node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)]))
_attrlist = [getattr(self, v) for v in attributes if hasattr(self, v)]
node_name = ", ".join([str(i) for i in _attrlist])
LEN = max(3, len(node_name) if not self.children or show_internal else 3)
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
if not self.is_leaf():
mids = []
result = []
for c in self.children:
if len(self.children) == 1:
char2 = '/'
elif c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact, attributes)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = int((lo + hi) / 2)
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + node_name + stem[len(node_name)+1:]
return (result, mid)
else:
return ([char1 + '-' + node_name], 0) |
<SYSTEM_TASK:>
This function sort the branches of a given tree by
<END_TASK>
<USER_TASK:>
Description:
def sort_descendants(self, attr="name"):
"""
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
""" |
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x]))) |
<SYSTEM_TASK:>
Iterate over the list of edges of a tree. Each egde is represented as a
<END_TASK>
<USER_TASK:>
Description:
def iter_edges(self, cached_content=None):
"""
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
""" |
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves - side1) |
<SYSTEM_TASK:>
Returns True if a given target attribute is monophyletic under
<END_TASK>
<USER_TASK:>
Description:
def check_monophyly(self,
values,
target_attr,
ignore_missing=False,
unrooted=False):
"""
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
Parameters:
-----------
values:
a set of values for which monophyly is expected.
target_attr:
node attribute being used to check monophyly (i.e. species for
species trees, names for gene family trees, or any custom feature
present in the tree).
ignore_missing:
Avoid raising an Exception when missing attributes are found.
unrooted:
If True, tree will be treated as unrooted, thus allowing to find
monophyly even when current outgroup is spliting a monophyletic group.
Returns:
--------
the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
""" |
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves |
<SYSTEM_TASK:>
Returns a list of nodes matching the provided monophyly
<END_TASK>
<USER_TASK:>
Description:
def get_monophyletic(self, values, target_attr):
"""
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
""" |
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match |
<SYSTEM_TASK:>
Removes all empty lines from above and below the text.
<END_TASK>
<USER_TASK:>
Description:
def truncate_empty_lines(lines):
"""
Removes all empty lines from above and below the text.
We can't just use text.strip() because that would remove the leading
space for the table.
Parameters
----------
lines : list of str
Returns
-------
lines : list of str
The text lines without empty lines above or below
""" |
while lines[0].rstrip() == '':
lines.pop(0)
while lines[len(lines) - 1].rstrip() == '':
lines.pop(-1)
return lines |
<SYSTEM_TASK:>
Convert a string or html file to an rst table string.
<END_TASK>
<USER_TASK:>
Description:
def html2rst(html_string, force_headers=False, center_cells=False,
center_headers=False):
"""
Convert a string or html file to an rst table string.
Parameters
----------
html_string : str
Either the html string, or the filepath to the html
force_headers : bool
Make the first row become headers, whether or not they are
headers in the html file.
center_cells : bool
Whether or not to center the contents of the cells
center_headers : bool
Whether or not to center the contents of the header cells
Returns
-------
str
The html table converted to an rst grid table
Notes
-----
This function **requires** BeautifulSoup_ to work.
Example
-------
>>> html_text = '''
... <table>
... <tr>
... <th>
... Header 1
... </th>
... <th>
... Header 2
... </th>
... <th>
... Header 3
... </th>
... <tr>
... <td>
... <p>This is a paragraph</p>
... </td>
... <td>
... <ul>
... <li>List item 1</li>
... <li>List item 2</li>
... </ul>
... </td>
... <td>
... <ol>
... <li>Ordered 1</li>
... <li>Ordered 2</li>
... </ol>
... </td>
... </tr>
... </table>
... '''
>>> import dashtable
>>> print(dashtable.html2rst(html_text))
+---------------------+----------------+--------------+
| Header 1 | Header 2 | Header 3 |
+=====================+================+==============+
| This is a paragraph | - List item 1 | #. Ordered 1 |
| | - List item 2 | #. Ordered 2 |
+---------------------+----------------+--------------+
.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/
""" |
if os.path.isfile(html_string):
file = open(html_string, 'r', encoding='utf-8')
lines = file.readlines()
file.close()
html_string = ''.join(lines)
table_data, spans, use_headers = html2data(
html_string)
if table_data == '':
return ''
if force_headers:
use_headers = True
return data2rst(table_data, spans, use_headers, center_cells, center_headers) |
<SYSTEM_TASK:>
Create a list of rows and columns that will make up a span
<END_TASK>
<USER_TASK:>
Description:
def make_span(row, column, extra_rows, extra_columns):
"""
Create a list of rows and columns that will make up a span
Parameters
----------
row : int
The row of the first cell in the span
column : int
The column of the first cell in the span
extra_rows : int
The number of rows that make up the span
extra_columns : int
The number of columns that make up the span
Returns
-------
span : list of lists of int
A span is a list of [row, column] pairs that make up a span
""" |
span = [[row, column]]
for r in range(row, row + extra_rows + 1):
span.append([r, column])
for c in range(column, column + extra_columns + 1):
span.append([row, c])
span.append([r, c])
return span |
<SYSTEM_TASK:>
Convert the contents of a span of the table to a grid table cell
<END_TASK>
<USER_TASK:>
Description:
def make_cell(table, span, widths, heights, use_headers):
"""
Convert the contents of a span of the table to a grid table cell
Parameters
----------
table : list of lists of str
The table of rows containg strings to convert to a grid table
span : list of lists of int
list of [row, column] pairs that make up a span in the table
widths : list of int
list of the column widths of the table
heights : list of int
list of the heights of each row in the table
use_headers : bool
Whether or not to use headers in the table
Returns
-------
cell : dashtable.data2rst.Cell
""" |
width = get_span_char_width(span, widths)
height = get_span_char_height(span, heights)
text_row = span[0][0]
text_column = span[0][1]
text = table[text_row][text_column]
lines = text.split("\n")
for i in range(len(lines)):
width_difference = width - len(lines[i])
lines[i] = ''.join([lines[i], " " * width_difference])
height_difference = height - len(lines)
empty_lines = []
for i in range(0, height_difference):
empty_lines.append(" " * width)
lines.extend(empty_lines)
output = [
''.join(["+", (width * "-") + "+"])
]
for i in range(0, height):
output.append("|" + lines[i] + "|")
if use_headers and span[0][0] == 0:
symbol = "="
else:
symbol = "-"
output.append(
''.join(["+", width * symbol, "+"])
)
text = "\n".join(output)
row_count = get_span_row_count(span)
column_count = get_span_column_count(span)
cell = Cell(text, text_row, text_column, row_count, column_count)
return cell |
<SYSTEM_TASK:>
Initialize the versioning support using SQLAlchemy-Continuum.
<END_TASK>
<USER_TASK:>
Description:
def init_versioning(self, app, database, versioning_manager=None):
"""Initialize the versioning support using SQLAlchemy-Continuum.""" |
try:
pkg_resources.get_distribution('sqlalchemy_continuum')
except pkg_resources.DistributionNotFound: # pragma: no cover
default_versioning = False
else:
default_versioning = True
app.config.setdefault('DB_VERSIONING', default_versioning)
if not app.config['DB_VERSIONING']:
return
if not default_versioning: # pragma: no cover
raise RuntimeError(
'Please install extra versioning support first by running '
'pip install invenio-db[versioning].'
)
# Now we can import SQLAlchemy-Continuum.
from sqlalchemy_continuum import make_versioned
from sqlalchemy_continuum import versioning_manager as default_vm
from sqlalchemy_continuum.plugins import FlaskPlugin
# Try to guess user model class:
if 'DB_VERSIONING_USER_MODEL' not in app.config: # pragma: no cover
try:
pkg_resources.get_distribution('invenio_accounts')
except pkg_resources.DistributionNotFound:
user_cls = None
else:
user_cls = 'User'
else:
user_cls = app.config.get('DB_VERSIONING_USER_MODEL')
plugins = [FlaskPlugin()] if user_cls else []
# Call make_versioned() before your models are defined.
self.versioning_manager = versioning_manager or default_vm
make_versioned(
user_cls=user_cls,
manager=self.versioning_manager,
plugins=plugins,
)
# Register models that have been loaded beforehand.
builder = self.versioning_manager.builder
for tbl in database.metadata.tables.values():
builder.instrument_versioned_classes(
database.mapper, get_class_by_table(database.Model, tbl)
) |
<SYSTEM_TASK:>
Ensure SQLite checks foreign key constraints.
<END_TASK>
<USER_TASK:>
Description:
def do_sqlite_connect(dbapi_connection, connection_record):
"""Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support
""" |
# Enable foreign key constraint checking
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close() |
<SYSTEM_TASK:>
Call before engine creation.
<END_TASK>
<USER_TASK:>
Description:
def apply_driver_hacks(self, app, info, options):
"""Call before engine creation.""" |
# Don't forget to apply hacks defined on parent object.
super(SQLAlchemy, self).apply_driver_hacks(app, info, options)
if info.drivername == 'sqlite':
connect_args = options.setdefault('connect_args', {})
if 'isolation_level' not in connect_args:
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
connect_args['isolation_level'] = None
if not event.contains(Engine, 'connect', do_sqlite_connect):
event.listen(Engine, 'connect', do_sqlite_connect)
if not event.contains(Engine, 'begin', do_sqlite_begin):
event.listen(Engine, 'begin', do_sqlite_begin)
from sqlite3 import register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return proxy._get_current_object()
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'postgresql+psycopg2': # pragma: no cover
from psycopg2.extensions import adapt, register_adapter
def adapt_proxy(proxy):
"""Get current object and try to adapt it again."""
return adapt(proxy._get_current_object())
register_adapter(LocalProxy, adapt_proxy)
elif info.drivername == 'mysql+pymysql': # pragma: no cover
from pymysql import converters
def escape_local_proxy(val, mapping):
"""Get current object and try to adapt it again."""
return converters.escape_item(
val._get_current_object(),
self.engine.dialect.encoding,
mapping=mapping,
)
converters.conversions[LocalProxy] = escape_local_proxy
converters.encoders[LocalProxy] = escape_local_proxy |
<SYSTEM_TASK:>
Find the length of a colspan.
<END_TASK>
<USER_TASK:>
Description:
def get_span_column_count(span):
"""
Find the length of a colspan.
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
columns : int
The number of columns included in the span
Example
-------
Consider this table::
+------+------------------+
| foo | bar |
+------+--------+---------+
| spam | goblet | berries |
+------+--------+---------+
::
>>> span = [[0, 1], [0, 2]]
>>> print(get_span_column_count(span))
2
""" |
columns = 1
first_column = span[0][1]
for i in range(len(span)):
if span[i][1] > first_column:
columns += 1
first_column = span[i][1]
return columns |
<SYSTEM_TASK:>
Sum the widths of the columns that make up the span, plus the extra.
<END_TASK>
<USER_TASK:>
Description:
def get_span_char_width(span, column_widths):
"""
Sum the widths of the columns that make up the span, plus the extra.
Parameters
----------
span : list of lists of int
list of [row, column] pairs that make up the span
column_widths : list of int
The widths of the columns that make up the table
Returns
-------
total_width : int
The total width of the span
""" |
start_column = span[0][1]
column_count = get_span_column_count(span)
total_width = 0
for i in range(start_column, start_column + column_count):
total_width += column_widths[i]
total_width += column_count - 1
return total_width |
<SYSTEM_TASK:>
Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
<END_TASK>
<USER_TASK:>
Description:
def rebuild_encrypted_properties(old_key, model, properties):
"""Rebuild a model's EncryptedType properties when the SECRET_KEY is changed.
:param old_key: old SECRET_KEY.
:param model: the affected db model.
:param properties: list of properties to rebuild.
""" |
inspector = reflection.Inspector.from_engine(db.engine)
primary_key_names = inspector.get_primary_keys(model.__tablename__)
new_secret_key = current_app.secret_key
db.session.expunge_all()
try:
with db.session.begin_nested():
current_app.secret_key = old_key
db_columns = []
for primary_key in primary_key_names:
db_columns.append(getattr(model, primary_key))
for prop in properties:
db_columns.append(getattr(model, prop))
old_rows = db.session.query(*db_columns).all()
except Exception as e:
current_app.logger.error(
'Exception occurred while reading encrypted properties. '
'Try again before starting the server with the new secret key.')
raise e
finally:
current_app.secret_key = new_secret_key
db.session.expunge_all()
for old_row in old_rows:
primary_keys, old_entries = old_row[:len(primary_key_names)], \
old_row[len(primary_key_names):]
primary_key_fields = dict(zip(primary_key_names, primary_keys))
update_values = dict(zip(properties, old_entries))
model.query.filter_by(**primary_key_fields).\
update(update_values)
db.session.commit() |
<SYSTEM_TASK:>
Get the name of the versioned model class.
<END_TASK>
<USER_TASK:>
Description:
def versioning_model_classname(manager, model):
"""Get the name of the versioned model class.""" |
if manager.options.get('use_module_name', True):
return '%s%sVersion' % (
model.__module__.title().replace('.', ''), model.__name__)
else:
return '%sVersion' % (model.__name__,) |
<SYSTEM_TASK:>
Return True if all versioning models have been registered.
<END_TASK>
<USER_TASK:>
Description:
def versioning_models_registered(manager, base):
"""Return True if all versioning models have been registered.""" |
declared_models = base._decl_class_registry.keys()
return all(versioning_model_classname(manager, c) in declared_models
for c in manager.pending_classes) |
<SYSTEM_TASK:>
Add the links to the bottom of the text
<END_TASK>
<USER_TASK:>
Description:
def add_links(converted_text, html):
"""
Add the links to the bottom of the text
""" |
soup = BeautifulSoup(html, 'html.parser')
link_exceptions = [
'footnote-reference',
'fn-backref',
'citation-reference'
]
footnotes = {}
citations = {}
backrefs = {}
links = soup.find_all('a')
for link in links:
href = link.get('href')
text = process_tag(link)
classes = dict(link.attrs).get('class', '')
if 'footnote-reference' in classes:
footnotes[href] = '#' + link.get('id')
elif 'citation-reference' in classes:
text = process_tag(link)
citations[text] = '#' + link.get('id')
elif 'fn-backref' in classes:
sibling = link.findNext('td')
text = process_tag(sibling)
backrefs[href] = text
excepted_link = False
for class_type in classes:
if class_type in link_exceptions:
excepted_link = True
if not excepted_link:
if text.endswith('_'):
text = text[0:-1]
if len(text.split(' ')) > 1:
text = text[1:-1]
converted_text += '.. _' + text + ': ' + href + '\n'
if len(footnotes.keys()) > 0:
converted_text += '\n'
for key in footnotes.keys():
text = backrefs[footnotes[key]]
converted_text += '.. [' + key + '] ' + text + '\n'
if len(citations.keys()) > 0:
converted_text += '\n'
for key in citations.keys():
text = backrefs[citations[key]]
converted_text += '.. ' + key[0:-1] + ' ' + text + '\n'
return converted_text.rstrip() |
<SYSTEM_TASK:>
Returns an extended majority rule consensus tree as a Toytree object.
<END_TASK>
<USER_TASK:>
Description:
def get_consensus_tree(self, cutoff=0.0, best_tree=None):
"""
Returns an extended majority rule consensus tree as a Toytree object.
Node labels include 'support' values showing the occurrence of clades
in the consensus tree across trees in the input treelist.
Clades with support below 'cutoff' are collapsed into polytomies.
If you enter an optional 'best_tree' then support values from
the treelist calculated for clades in this tree, and the best_tree is
returned with support values added to nodes.
Params
------
cutoff (float; default=0.0):
Cutoff below which clades are collapsed in the majority rule
consensus tree. This is a proportion (e.g., 0.5 means 50%).
best_tree (Toytree; optional):
A tree that support values should be calculated for and added to.
For example, you want to calculate how often clades in your best
ML tree are supported in 100 bootstrap trees.
""" |
if best_tree:
raise NotImplementedError("best_tree option not yet supported.")
cons = ConsensusTree(self.treelist, cutoff)
cons.update()
return cons.ttree |
<SYSTEM_TASK:>
Compare the phonetic representations of 2 words, and return a boolean value.
<END_TASK>
<USER_TASK:>
Description:
def sounds_like(self, word1, word2):
"""Compare the phonetic representations of 2 words, and return a boolean value.""" |
return self.phonetics(word1) == self.phonetics(word2) |
<SYSTEM_TASK:>
Get the similarity of the words, using the supported distance metrics.
<END_TASK>
<USER_TASK:>
Description:
def distance(self, word1, word2, metric='levenshtein'):
"""Get the similarity of the words, using the supported distance metrics.""" |
if metric in self.distances:
distance_func = self.distances[metric]
return distance_func(self.phonetics(word1), self.phonetics(word2))
else:
raise DistanceMetricError('Distance metric not supported! Choose from levenshtein, hamming.') |
<SYSTEM_TASK:>
Get the heights of the rows of the output table.
<END_TASK>
<USER_TASK:>
Description:
def get_output_row_heights(table, spans):
"""
Get the heights of the rows of the output table.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
heights : list of int
The heights of each row in the output table
""" |
heights = []
for row in table:
heights.append(-1)
for row in range(len(table)):
for column in range(len(table[row])):
text = table[row][column]
span = get_span(spans, row, column)
row_count = get_span_row_count(span)
height = len(text.split('\n'))
if row_count == 1 and height > heights[row]:
heights[row] = height
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
row_count = get_span_row_count(span)
if row_count > 1:
text_row = span[0][0]
text_column = span[0][1]
end_row = text_row + row_count
text = table[text_row][text_column]
height = len(text.split('\n')) - (row_count - 1)
add_row = 0
while height > sum(heights[text_row:end_row]):
heights[text_row + add_row] += 1
if add_row + 1 < row_count:
add_row += 1
else:
add_row = 0
return heights |
<SYSTEM_TASK:>
Ensure the span is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_span(span, table):
"""
Ensure the span is valid.
A span is a list of [row, column] pairs. These coordinates
must form a rectangular shape. For example, this span will cause an
error because it is not rectangular in shape.::
span = [[0, 1], [0, 2], [1, 0]]
Spans must be
* Rectanglular
* A list of lists of int
*
Parameters
----------
span : list of lists of int
table : list of lists of str
Return
------
exception_string : str
A message that states there was something wrong.
""" |
if not type(span) is list:
return "Spans must be a list of lists"
for pair in span:
if not type(pair) is list:
return "Spans must be a list of lists of int"
if not len(pair) == 2:
return "Spans must be a [Row, Column] pair of integers"
total_rows = get_span_row_count(span)
total_columns = get_span_column_count(span)
if not len(span) == total_rows * total_columns:
return ''.join(["Spans must be rectangular in shape. ",
str(span) + " is invalid"])
if max(span, key=lambda x: x[0])[0] > len(table) - 1:
return ' '.join(["One of the span's rows extends beyond the",
"bounds of the table:", str(span)])
if max(span, key=lambda x: x[1])[1] > len(table[0]) - 1:
return ' '.join(["One of the span's columns extends beyond the",
"bounds of the table:", str(span)])
test_span = copy.deepcopy(span)
checked = [test_span.pop(0)]
while len(test_span) > 0:
row = test_span[0][0]
col = test_span[0][1]
matched = False
for i in range(len(checked)):
if row == checked[i][0] and abs(col - checked[i][1]) == 1:
matched = True
elif abs(row - checked[i][0]) == 1 and col == checked[i][1]:
matched = True
if matched:
checked.append(test_span.pop(0))
else:
checked.extend(test_span)
return 'This span is not valid: ' + str(checked)
return "" |
<SYSTEM_TASK:>
Loop through list of cells and piece them together one by one
<END_TASK>
<USER_TASK:>
Description:
def merge_all_cells(cells):
"""
Loop through list of cells and piece them together one by one
Parameters
----------
cells : list of dashtable.data2rst.Cell
Returns
-------
grid_table : str
The final grid table
""" |
current = 0
while len(cells) > 1:
count = 0
while count < len(cells):
cell1 = cells[current]
cell2 = cells[count]
merge_direction = get_merge_direction(cell1, cell2)
if not merge_direction == "NONE":
merge_cells(cell1, cell2, merge_direction)
if current > count:
current -= 1
cells.pop(count)
else:
count += 1
current += 1
if current >= len(cells):
current = 0
return cells[0].text |
<SYSTEM_TASK:>
Returns a toytree copy with all nodes scaled so that the root
<END_TASK>
<USER_TASK:>
Description:
def node_scale_root_height(self, treeheight=1):
"""
Returns a toytree copy with all nodes scaled so that the root
height equals the value entered for treeheight.
""" |
# make tree height = 1 * treeheight
ctree = self._ttree.copy()
_height = ctree.treenode.height
for node in ctree.treenode.traverse():
node.dist = (node.dist / _height) * treeheight
ctree._coords.update()
return ctree |
<SYSTEM_TASK:>
Returns a toytree copy with node heights modified while retaining
<END_TASK>
<USER_TASK:>
Description:
def node_slider(self, seed=None):
"""
Returns a toytree copy with node heights modified while retaining
the same topology but not necessarily node branching order.
Node heights are moved up or down uniformly between their parent
and highest child node heights in 'levelorder' from root to tips.
The total tree height is retained at 1.0, only relative edge
lengths change.
""" |
# I don't think user's should need to access prop
prop = 0.999
assert isinstance(prop, float), "prop must be a float"
assert prop < 1, "prop must be a proportion >0 and < 1."
random.seed(seed)
ctree = self._ttree.copy()
for node in ctree.treenode.traverse():
## slide internal nodes
if node.up and node.children:
## get min and max slides
minjit = max([i.dist for i in node.children]) * prop
maxjit = (node.up.height * prop) - node.height
newheight = random.uniform(-minjit, maxjit)
## slide children
for child in node.children:
child.dist += newheight
## slide self to match
node.dist -= newheight
ctree._coords.update()
return ctree |
<SYSTEM_TASK:>
Get the height of a span in the number of newlines it fills.
<END_TASK>
<USER_TASK:>
Description:
def get_span_char_height(span, row_heights):
"""
Get the height of a span in the number of newlines it fills.
Parameters
----------
span : list of list of int
A list of [row, column] pairs that make up the span
row_heights : list of int
A list of the number of newlines for each row in the table
Returns
-------
total_height : int
The height of the span in number of newlines
""" |
start_row = span[0][0]
row_count = get_span_row_count(span)
total_height = 0
for i in range(start_row, start_row + row_count):
total_height += row_heights[i]
total_height += row_count - 1
return total_height |
<SYSTEM_TASK:>
Convert an html table to a data table and spans.
<END_TASK>
<USER_TASK:>
Description:
def html2data(html_string):
"""
Convert an html table to a data table and spans.
Parameters
----------
html_string : str
The string containing the html table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a list of [row, column] pairs that define what cells
are merged in a table.
use_headers : bool
""" |
spans = extract_spans(html_string)
column_count = get_html_column_count(html_string)
row_count = get_html_row_count(spans)
count = 0
while count < len(spans):
if len(spans[count]) == 1:
spans.pop(count)
else:
count += 1
table = extract_table(html_string, row_count, column_count)
use_headers = headers_present(html_string)
return table, spans, use_headers |
<SYSTEM_TASK:>
Returns node values from tree object in node plot order. To modify
<END_TASK>
<USER_TASK:>
Description:
def get_node_values(
self,
feature=None,
show_root=False,
show_tips=False,
):
"""
Returns node values from tree object in node plot order. To modify
values you must modify the .treenode object directly by setting new
'features'. For example
for node in ttree.treenode.traverse():
node.add_feature("PP", 100)
By default node and tip values are hidden (set to "") so that they
are not shown on the tree plot. To include values for these nodes
use the 'show_root'=True, or 'show_tips'=True arguments.
tree.get_node_values("support", True, True)
""" |
# access nodes in the order they will be plotted
ndict = self.get_node_dict(return_internal=True, return_nodes=True)
nodes = [ndict[i] for i in range(self.nnodes)[::-1]]
# get features
if feature:
vals = [i.__getattribute__(feature) if hasattr(i, feature)
else "" for i in nodes]
else:
vals = [" " for i in nodes]
# apply hiding rules
if not show_root:
vals = [i if not j.is_root() else "" for i, j in zip(vals, nodes)]
if not show_tips:
vals = [i if not j.is_leaf() else "" for i, j in zip(vals, nodes)]
# convert float to ints for prettier printing unless all floats
# raise exception and skip if there are true strings (names)
try:
if all([Decimal(str(i)) % 1 == 0 for i in vals if i]):
vals = [int(i) if isinstance(i, float) else i for i in vals]
except Exception:
pass
return vals |
<SYSTEM_TASK:>
Returns coordinates of the tip positions for a tree. If no argument
<END_TASK>
<USER_TASK:>
Description:
def get_tip_coordinates(self, axis=None):
"""
Returns coordinates of the tip positions for a tree. If no argument
for axis then a 2-d array is returned. The first column is the x
coordinates the second column is the y-coordinates. If you enter an
argument for axis then a 1-d array will be returned of just that axis.
""" |
# get coordinates array
coords = self.get_node_coordinates()
if axis == 'x':
return coords[:self.ntips, 0]
elif axis == 'y':
return coords[:self.ntips, 1]
return coords[:self.ntips] |
<SYSTEM_TASK:>
Returns a ToyTree with the selected node rotated for plotting.
<END_TASK>
<USER_TASK:>
Description:
def rotate_node(
self,
names=None,
wildcard=None,
regex=None,
idx=None,
# modify_tree=False,
):
"""
Returns a ToyTree with the selected node rotated for plotting.
tip colors do not align correct currently if nodes are rotated...
""" |
# make a copy
revd = {j: i for (i, j) in enumerate(self.get_tip_labels())}
neworder = {}
# get node to rotate
treenode = fuzzy_match_tipnames(
self, names, wildcard, regex, True, True)
children = treenode.up.children
names = [[j.name for j in i.get_leaves()] for i in children]
nidxs = [[revd[i] for i in j] for j in names]
# get size of the big clade
move = max((len(i) for i in nidxs))
if len(nidxs[0]) > len(nidxs[1]):
move = min((len(i) for i in nidxs))
# newdict
cnames = list(itertools.chain(*names))
tdict = {i: None for i in cnames}
cycle = itertools.cycle(itertools.chain(*nidxs))
for m in range(move):
next(cycle)
for t in cnames:
tdict[t] = next(cycle)
for key in revd:
if key in tdict:
neworder[key] = tdict[key]
else:
neworder[key] = revd[key]
revd = {j: i for (i, j) in neworder.items()}
neworder = [revd[i] for i in range(self.ntips)]
# returns a new tree (i.e., copy) modified w/ a fixed order
nself = ToyTree(self.newick, fixed_order=neworder)
nself._coords.update()
return nself |
<SYSTEM_TASK:>
Returns a copy of the tree with all polytomies randomly resolved.
<END_TASK>
<USER_TASK:>
Description:
def resolve_polytomy(
self,
dist=1.0,
support=100,
recursive=True):
"""
Returns a copy of the tree with all polytomies randomly resolved.
Does not transform tree in-place.
""" |
nself = self.copy()
nself.treenode.resolve_polytomy(
default_dist=dist,
default_support=support,
recursive=recursive)
nself._coords.update()
return nself |
<SYSTEM_TASK:>
Returns a copy of the tree unrooted. Does not transform tree in-place.
<END_TASK>
<USER_TASK:>
Description:
def unroot(self):
"""
Returns a copy of the tree unrooted. Does not transform tree in-place.
""" |
nself = self.copy()
nself.treenode.unroot()
nself.treenode.ladderize()
nself._coords.update()
return nself |
<SYSTEM_TASK:>
Determine the side of cell1 that can be merged with cell2.
<END_TASK>
<USER_TASK:>
Description:
def get_merge_direction(cell1, cell2):
"""
Determine the side of cell1 that can be merged with cell2.
This is based on the location of the two cells in the table as well
as the compatability of their height and width.
For example these cells can merge::
cell1 cell2 merge "RIGHT"
+-----+ +------+ +-----+------+
| foo | | dog | | foo | dog |
| | +------+ | +------+
| | | cat | | | cat |
| | +------+ | +------+
| | | bird | | | bird |
+-----+ +------+ +-----+------+
But these cells cannot merge::
+-----+ +------+
| foo | | dog |
| | +------+
| | | cat |
| | +------+
| |
+-----+
Parameters
----------
cell1 : dashtable.data2rst.Cell
cell2 : dashtable.data2rst.Cell
Returns
-------
str
The side onto which cell2 can be merged. Will be one of
["LEFT", "RIGHT", "BOTTOM", "TOP", "NONE"]
""" |
cell1_left = cell1.column
cell1_right = cell1.column + cell1.column_count
cell1_top = cell1.row
cell1_bottom = cell1.row + cell1.row_count
cell2_left = cell2.column
cell2_right = cell2.column + cell2.column_count
cell2_top = cell2.row
cell2_bottom = cell2.row + cell2.row_count
if (cell1_right == cell2_left and cell1_top == cell2_top and
cell1_bottom == cell2_bottom and
cell1.right_sections >= cell2.left_sections):
return "RIGHT"
elif (cell1_left == cell2_left and cell1_right == cell2_right and
cell1_top == cell2_bottom and
cell1.top_sections >= cell2.bottom_sections):
return "TOP"
elif (cell1_left == cell2_left and
cell1_right == cell2_right and
cell1_bottom == cell2_top and
cell1.bottom_sections >= cell2.top_sections):
return "BOTTOM"
elif (cell1_left == cell2_right and
cell1_top == cell2_top and
cell1_bottom == cell2_bottom and
cell1.left_sections >= cell2.right_sections):
return "LEFT"
else:
return "NONE" |
<SYSTEM_TASK:>
if no conflicts then write new tag to
<END_TASK>
<USER_TASK:>
Description:
def push_git_package(self):
"""
if no conflicts then write new tag to
""" |
## check for conflicts, then write to local files
self._pull_branch_from_origin()
## log commits to releasenotes
if self.deploy:
self._write_commits_to_release_notes()
## writes tag or 'devel' to
try:
self._write_new_tag_to_init()
self._write_branch_and_tag_to_meta_yaml()
self._push_new_tag_to_git()
except Exception as inst:
print("\n Error:\n", inst)
self._revert_tag_in_init()
sys.exit(2) |
<SYSTEM_TASK:>
parses init.py to get previous version
<END_TASK>
<USER_TASK:>
Description:
def _get_init_release_tag(self):
"""
parses init.py to get previous version
""" |
self.init_version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
open(self.init_file, "r").read(),
re.M).group(1) |
<SYSTEM_TASK:>
calls git log to complile a change list
<END_TASK>
<USER_TASK:>
Description:
def _get_log_commits(self):
"""
calls git log to complile a change list
""" |
## check if update is necessary
cmd = "git log --pretty=oneline {}..".format(self.init_version)
cmdlist = shlex.split(cmd)
commits = subprocess.check_output(cmdlist)
## Split off just the first element, we don't need commit tag
self.commits = [x.split(" ", 1) for x in commits.split("\n")] |
<SYSTEM_TASK:>
writes commits to the releasenotes file by appending to the end
<END_TASK>
<USER_TASK:>
Description:
def _write_commits_to_release_notes(self):
"""
writes commits to the releasenotes file by appending to the end
""" |
with open(self.release_file, 'a') as out:
out.write("==========\n{}\n".format(self.tag))
for commit in self.commits:
try:
msg = commit[1]
if msg != "cosmetic":
out.write("-" + msg + "\n")
except:
pass |
<SYSTEM_TASK:>
Write branch and tag to meta.yaml by editing in place
<END_TASK>
<USER_TASK:>
Description:
def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
""" |
## set the branch to pull source from
with open(self.meta_yaml.replace("meta", "template"), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch})
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat) |
<SYSTEM_TASK:>
Run the Linux build and use converter to build OSX
<END_TASK>
<USER_TASK:>
Description:
def build_conda_packages(self):
"""
Run the Linux build and use converter to build OSX
""" |
## check if update is necessary
#if self.nversion == self.pversion:
# raise SystemExit("Exited: new version == existing version")
## tmp dir
bldir = "./tmp-bld"
if not os.path.exists(bldir):
os.makedirs(bldir)
## iterate over builds
for pybuild in ["2.7", "3"]:
## build and upload Linux to anaconda.org
build = api.build(
"conda-recipe/{}".format(self.package),
python=pybuild)
## upload Linux build
if not self.deploy:
cmd = ["anaconda", "upload", build[0], "--label", "test", "--force"]
else:
cmd = ["anaconda", "upload", build[0]]
err = subprocess.Popen(cmd).communicate()
## build OSX copies
api.convert(build[0], output_dir=bldir, platforms=["osx-64"])
osxdir = os.path.join(bldir, "osx-64", os.path.basename(build[0]))
if not self.deploy:
cmd = ["anaconda", "upload", osxdir, "--label", "test", "--force"]
else:
cmd = ["anaconda", "upload", osxdir]
err = subprocess.Popen(cmd).communicate()
## cleanup tmpdir
shutil.rmtree(bldir) |
<SYSTEM_TASK:>
Gets the number of rows included in a span
<END_TASK>
<USER_TASK:>
Description:
def get_span_row_count(span):
"""
Gets the number of rows included in a span
Parameters
----------
span : list of lists of int
The [row, column] pairs that make up the span
Returns
-------
rows : int
The number of rows included in the span
Example
-------
Consider this table::
+--------+-----+
| foo | bar |
+--------+ |
| spam | |
+--------+ |
| goblet | |
+--------+-----+
::
>>> span = [[0, 1], [1, 1], [2, 1]]
>>> print(get_span_row_count(span))
3
""" |
rows = 1
first_row = span[0][0]
for i in range(len(span)):
if span[i][0] > first_row:
rows += 1
first_row = span[i][0]
return rows |
<SYSTEM_TASK:>
Converts each multiline string in a table to single line.
<END_TASK>
<USER_TASK:>
Description:
def multis_2_mono(table):
"""
Converts each multiline string in a table to single line.
Parameters
----------
table : list of list of str
A list of rows containing strings
Returns
-------
table : list of lists of str
""" |
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = table[row][column].replace('\n', ' ')
return table |
<SYSTEM_TASK:>
Add leading & trailing space to text to center it within an allowed
<END_TASK>
<USER_TASK:>
Description:
def center_line(space, line):
"""
Add leading & trailing space to text to center it within an allowed
width
Parameters
----------
space : int
The maximum character width allowed for the text. If the length
of text is more than this value, no space will be added.\
line : str
The text that will be centered.
Returns
-------
line : str
The text with the leading space added to it
""" |
line = line.strip()
left_length = math.floor((space - len(line)) / 2)
right_length = math.ceil((space - len(line)) / 2)
left_space = " " * int(left_length)
right_space = " " * int(right_length)
line = ''.join([left_space, line, right_space])
return line |
<SYSTEM_TASK:>
Register a function in the function registry.
<END_TASK>
<USER_TASK:>
Description:
def register(self, function):
"""Register a function in the function registry.
The function will be automatically instantiated if not already an
instance.
""" |
function = inspect.isclass(function) and function() or function
name = function.name
self[name] = function |
<SYSTEM_TASK:>
Determine if there are spans within a row
<END_TASK>
<USER_TASK:>
Description:
def row_includes_spans(table, row, spans):
"""
Determine if there are spans within a row
Parameters
----------
table : list of lists of str
row : int
spans : list of lists of lists of int
Returns
-------
bool
Whether or not a table's row includes spans
""" |
for column in range(len(table[row])):
for span in spans:
if [row, column] in span:
return True
return False |
<SYSTEM_TASK:>
Create a StateList object from a 'states' Workflow attribute.
<END_TASK>
<USER_TASK:>
Description:
def _setup_states(state_definitions, prev=()):
"""Create a StateList object from a 'states' Workflow attribute.""" |
states = list(prev)
for state_def in state_definitions:
if len(state_def) != 2:
raise TypeError(
"The 'state' attribute of a workflow should be "
"a two-tuple of strings; got %r instead." % (state_def,)
)
name, title = state_def
state = State(name, title)
if any(st.name == name for st in states):
# Replacing an existing state
states = [state if st.name == name else st for st in states]
else:
states.append(state)
return StateList(states) |
<SYSTEM_TASK:>
Create a TransitionList object from a 'transitions' Workflow attribute.
<END_TASK>
<USER_TASK:>
Description:
def _setup_transitions(tdef, states, prev=()):
"""Create a TransitionList object from a 'transitions' Workflow attribute.
Args:
tdef: list of transition definitions
states (StateList): already parsed state definitions.
prev (TransitionList): transition definitions from a parent.
Returns:
TransitionList: the list of transitions defined in the 'tdef' argument.
""" |
trs = list(prev)
for transition in tdef:
if len(transition) == 3:
(name, source, target) = transition
if is_string(source) or isinstance(source, State):
source = [source]
source = [states[src] for src in source]
target = states[target]
tr = Transition(name, source, target)
else:
raise TypeError(
"Elements of the 'transition' attribute of a "
"workflow should be three-tuples; got %r instead." % (transition,)
)
if any(prev_tr.name == tr.name for prev_tr in trs):
# Replacing an existing state
trs = [tr if prev_tr.name == tr.name else prev_tr for prev_tr in trs]
else:
trs.append(tr)
return TransitionList(trs) |
<SYSTEM_TASK:>
Decorator to declare a function as a transition implementation.
<END_TASK>
<USER_TASK:>
Description:
def transition(trname='', field='', check=None, before=None, after=None):
"""Decorator to declare a function as a transition implementation.""" |
if is_callable(trname):
raise ValueError(
"The @transition decorator should be called as "
"@transition(['transition_name'], **kwargs)")
if check or before or after:
warnings.warn(
"The use of check=, before= and after= in @transition decorators is "
"deprecated in favor of @transition_check, @before_transition and "
"@after_transition decorators.",
DeprecationWarning,
stacklevel=2)
return TransitionWrapper(trname, field=field, check=check, before=before, after=after) |
<SYSTEM_TASK:>
Ensure the given function has a xworkflows_hook attribute.
<END_TASK>
<USER_TASK:>
Description:
def _make_hook_dict(fun):
"""Ensure the given function has a xworkflows_hook attribute.
That attribute has the following structure:
>>> {
... 'before': [('state', <TransitionHook>), ...],
... }
""" |
if not hasattr(fun, 'xworkflows_hook'):
fun.xworkflows_hook = {
HOOK_BEFORE: [],
HOOK_AFTER: [],
HOOK_CHECK: [],
HOOK_ON_ENTER: [],
HOOK_ON_LEAVE: [],
}
return fun.xworkflows_hook |
<SYSTEM_TASK:>
Checks whether a given State matches self.names.
<END_TASK>
<USER_TASK:>
Description:
def _match_state(self, state):
"""Checks whether a given State matches self.names.""" |
return (self.names == '*'
or state in self.names
or state.name in self.names) |
<SYSTEM_TASK:>
Checks whether a given Transition matches self.names.
<END_TASK>
<USER_TASK:>
Description:
def _match_transition(self, transition):
"""Checks whether a given Transition matches self.names.""" |
return (self.names == '*'
or transition in self.names
or transition.name in self.names) |
<SYSTEM_TASK:>
Filter a list of hooks, keeping only applicable ones.
<END_TASK>
<USER_TASK:>
Description:
def _filter_hooks(self, *hook_kinds):
"""Filter a list of hooks, keeping only applicable ones.""" |
hooks = sum((self.hooks.get(kind, []) for kind in hook_kinds), [])
return sorted(hook for hook in hooks
if hook.applies_to(self.transition, self.current_state)) |
<SYSTEM_TASK:>
Performs post-transition actions.
<END_TASK>
<USER_TASK:>
Description:
def _post_transition(self, result, *args, **kwargs):
"""Performs post-transition actions.""" |
for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER):
hook(self.instance, result, *args, **kwargs) |
<SYSTEM_TASK:>
Import previously defined implementations.
<END_TASK>
<USER_TASK:>
Description:
def load_parent_implems(self, parent_implems):
"""Import previously defined implementations.
Args:
parent_implems (ImplementationList): List of implementations defined
in a parent class.
""" |
for trname, attr, implem in parent_implems.get_custom_implementations():
self.implementations[trname] = implem.copy()
self.transitions_at[trname] = attr
self.custom_implems.add(trname) |
<SYSTEM_TASK:>
Add an implementation.
<END_TASK>
<USER_TASK:>
Description:
def add_implem(self, transition, attribute, function, **kwargs):
"""Add an implementation.
Args:
transition (Transition): the transition for which the implementation
is added
attribute (str): the name of the attribute where the implementation
will be available
function (callable): the actual implementation function
**kwargs: extra arguments for the related ImplementationProperty.
""" |
implem = ImplementationProperty(
field_name=self.state_field,
transition=transition,
workflow=self.workflow,
implementation=function,
**kwargs)
self.implementations[transition.name] = implem
self.transitions_at[transition.name] = attribute
return implem |
<SYSTEM_TASK:>
Decide whether a given value should be collected.
<END_TASK>
<USER_TASK:>
Description:
def should_collect(self, value):
"""Decide whether a given value should be collected.""" |
return (
# decorated with @transition
isinstance(value, TransitionWrapper)
# Relates to a compatible transition
and value.trname in self.workflow.transitions
# Either not bound to a state field or bound to the current one
and (not value.field or value.field == self.state_field)) |
<SYSTEM_TASK:>
Collect the implementations from a given attributes dict.
<END_TASK>
<USER_TASK:>
Description:
def collect(self, attrs):
"""Collect the implementations from a given attributes dict.""" |
for name, value in attrs.items():
if self.should_collect(value):
transition = self.workflow.transitions[value.trname]
if (
value.trname in self.implementations
and value.trname in self.custom_implems
and name != self.transitions_at[value.trname]):
# We already have an implementation registered.
other_implem_at = self.transitions_at[value.trname]
raise ValueError(
"Error for attribute %s: it defines implementation "
"%s for transition %s, which is already implemented "
"at %s." % (name, value, transition, other_implem_at))
implem = self.add_implem(transition, name, value.func)
self.custom_implems.add(transition.name)
if value.check:
implem.add_hook(Hook(HOOK_CHECK, value.check))
if value.before:
implem.add_hook(Hook(HOOK_BEFORE, value.before))
if value.after:
implem.add_hook(Hook(HOOK_AFTER, value.after)) |
<SYSTEM_TASK:>
Retrieve a list of cutom implementations.
<END_TASK>
<USER_TASK:>
Description:
def get_custom_implementations(self):
"""Retrieve a list of cutom implementations.
Yields:
(str, str, ImplementationProperty) tuples: The name of the attribute
an implementation lives at, the name of the related transition,
and the related implementation.
""" |
for trname in self.custom_implems:
attr = self.transitions_at[trname]
implem = self.implementations[trname]
yield (trname, attr, implem) |
<SYSTEM_TASK:>
Looks at an object method and registers it for relevent transitions.
<END_TASK>
<USER_TASK:>
Description:
def register_function_hooks(self, func):
"""Looks at an object method and registers it for relevent transitions.""" |
for hook_kind, hooks in func.xworkflows_hook.items():
for field_name, hook in hooks:
if field_name and field_name != self.state_field:
continue
for transition in self.workflow.transitions:
if hook.applies_to(transition):
implem = self.implementations[transition.name]
implem.add_hook(hook) |
<SYSTEM_TASK:>
Checks whether an ImplementationProperty may override an attribute.
<END_TASK>
<USER_TASK:>
Description:
def _may_override(self, implem, other):
"""Checks whether an ImplementationProperty may override an attribute.""" |
if isinstance(other, ImplementationProperty):
# Overriding another custom implementation for the same transition
# and field
return (other.transition == implem.transition and other.field_name == self.state_field)
elif isinstance(other, TransitionWrapper):
# Overriding the definition that led to adding the current
# ImplementationProperty.
return (
other.trname == implem.transition.name
and (not other.field or other.field == self.state_field)
and other.func == implem.implementation)
return False |
<SYSTEM_TASK:>
Update the 'attrs' dict with generated ImplementationProperty.
<END_TASK>
<USER_TASK:>
Description:
def fill_attrs(self, attrs):
"""Update the 'attrs' dict with generated ImplementationProperty.""" |
for trname, attrname in self.transitions_at.items():
implem = self.implementations[trname]
if attrname in attrs:
conflicting = attrs[attrname]
if not self._may_override(implem, conflicting):
raise ValueError(
"Can't override transition implementation %s=%r with %r" %
(attrname, conflicting, implem))
attrs[attrname] = implem
return attrs |
<SYSTEM_TASK:>
Perform all actions on a given attribute dict.
<END_TASK>
<USER_TASK:>
Description:
def transform(self, attrs):
"""Perform all actions on a given attribute dict.""" |
self.collect(attrs)
self.add_missing_implementations()
self.fill_attrs(attrs) |
<SYSTEM_TASK:>
Log a transition.
<END_TASK>
<USER_TASK:>
Description:
def log_transition(self, transition, from_state, instance, *args, **kwargs):
"""Log a transition.
Args:
transition (Transition): the name of the performed transition
from_state (State): the source state
instance (object): the modified object
Kwargs:
Any passed when calling the transition
""" |
logger = logging.getLogger('xworkflows.transitions')
try:
instance_repr = u(repr(instance), 'ignore')
except (UnicodeEncodeError, UnicodeDecodeError):
instance_repr = u("<bad repr>")
logger.info(
u("%s performed transition %s.%s (%s -> %s)"), instance_repr,
self.__class__.__name__, transition.name, from_state.name,
transition.target.name) |
<SYSTEM_TASK:>
Finds all occurrences of a workflow in the attributes definitions.
<END_TASK>
<USER_TASK:>
Description:
def _find_workflows(mcs, attrs):
"""Finds all occurrences of a workflow in the attributes definitions.
Returns:
dict(str => StateField): maps an attribute name to a StateField
describing the related Workflow.
""" |
workflows = {}
for attribute, value in attrs.items():
if isinstance(value, Workflow):
workflows[attribute] = StateField(value)
return workflows |
<SYSTEM_TASK:>
Collect and enhance transition definitions to a workflow.
<END_TASK>
<USER_TASK:>
Description:
def _add_transitions(mcs, field_name, workflow, attrs, implems=None):
"""Collect and enhance transition definitions to a workflow.
Modifies the 'attrs' dict in-place.
Args:
field_name (str): name of the field transitions should update
workflow (Workflow): workflow we're working on
attrs (dict): dictionary of attributes to be updated.
implems (ImplementationList): Implementation list from parent
classes (optional)
Returns:
ImplementationList: The new implementation list for this field.
""" |
new_implems = ImplementationList(field_name, workflow)
if implems:
new_implems.load_parent_implems(implems)
new_implems.transform(attrs)
return new_implems |
<SYSTEM_TASK:>
Modify coords to shift tree position for x,y baseline arguments. This
<END_TASK>
<USER_TASK:>
Description:
def set_baselines(self):
"""
Modify coords to shift tree position for x,y baseline arguments. This
is useful for arrangeing trees onto a Canvas with other plots, but
still sharing a common cartesian axes coordinates.
""" |
if self.style.xbaseline:
if self.style.orient in ("up", "down"):
self.coords.coords[:, 0] += self.style.xbaseline
self.coords.verts[:, 0] += self.style.xbaseline
else:
self.coords.coords[:, 1] += self.style.xbaseline
self.coords.verts[:, 1] += self.style.xbaseline |
<SYSTEM_TASK:>
Creates a new marker for every node from idx indexes and lists of
<END_TASK>
<USER_TASK:>
Description:
def add_nodes_to_axes(self):
"""
Creates a new marker for every node from idx indexes and lists of
node_values, node_colors, node_sizes, node_style, node_labels_style.
Pulls from node_color and adds to a copy of the style dict for each
node to create marker.
Node_colors has priority to overwrite node_style['fill']
""" |
# bail out if not any visible nodes (e.g., none w/ size>0)
if all([i == "" for i in self.node_labels]):
return
# build markers for each node.
marks = []
for nidx in self.ttree.get_node_values('idx', 1, 1):
# select node value from deconstructed lists
nlabel = self.node_labels[nidx]
nsize = self.node_sizes[nidx]
nmarker = self.node_markers[nidx]
# get styledict copies
nstyle = deepcopy(self.style.node_style)
nlstyle = deepcopy(self.style.node_labels_style)
# and mod style dict copies from deconstructed lists
nstyle["fill"] = self.node_colors[nidx]
# create mark if text or node
if (nlabel or nsize):
mark = toyplot.marker.create(
shape=nmarker,
label=str(nlabel),
size=nsize,
mstyle=nstyle,
lstyle=nlstyle,
)
else:
mark = ""
# store the nodes/marks
marks.append(mark)
# node_hover == True to show all features interactive
if self.style.node_hover is True:
title = self.get_hover()
elif isinstance(self.style.node_hover, list):
# todo: return advice if improperly formatted
title = self.style.node_hover
# if hover is false then no hover
else:
title = None
# add nodes
self.axes.scatterplot(
self.coords.verts[:, 0],
self.coords.verts[:, 1],
marker=marks,
title=title,
) |
<SYSTEM_TASK:>
Get starting position of tip labels text based on locations of the
<END_TASK>
<USER_TASK:>
Description:
def get_tip_label_coords(self):
"""
Get starting position of tip labels text based on locations of the
leaf nodes on the tree and style offset and align options. Node
positions are found using the .verts attribute of coords and is
already oriented for the tree face direction.
""" |
# number of tips
ns = self.ttree.ntips
# x-coordinate of tips assuming down-face
tip_xpos = self.coords.verts[:ns, 0]
tip_ypos = self.coords.verts[:ns, 1]
align_edges = None
align_verts = None
# handle orientations
if self.style.orient in (0, 'down'):
# align tips at zero
if self.style.tip_labels_align:
tip_yend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_ypos), i) for i in range(len(tip_ypos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xpos, tip_yend))
)
tip_ypos = tip_yend
else:
# tip labels align finds the zero axis for orientation...
if self.style.tip_labels_align:
tip_xend = np.zeros(ns)
align_edges = np.array([
(i + len(tip_xpos), i) for i in range(len(tip_xpos))
])
align_verts = np.array(
list(zip(tip_xpos, tip_ypos)) + \
list(zip(tip_xend, tip_ypos))
)
tip_xpos = tip_xend
return tip_xpos, tip_ypos, align_edges, align_verts |
<SYSTEM_TASK:>
Get the length longest line in a paragraph
<END_TASK>
<USER_TASK:>
Description:
def get_longest_line_length(text):
"""Get the length longest line in a paragraph""" |
lines = text.split("\n")
length = 0
for i in range(len(lines)):
if len(lines[i]) > length:
length = len(lines[i])
return length |
<SYSTEM_TASK:>
Format a number according to a given number of significant figures.
<END_TASK>
<USER_TASK:>
Description:
def significant_format(number, decimal_sep='.', thousand_sep=',', n=3):
"""Format a number according to a given number of significant figures.
""" |
str_number = significant(number, n)
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
if str_number[0] == '-':
str_number = str_number[1:]
if '.' in str_number:
int_part, dec_part = str_number.split('.')
else:
int_part, dec_part = str_number, ''
if dec_part:
dec_part = decimal_sep + dec_part
if thousand_sep:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % 3:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part |
<SYSTEM_TASK:>
Create a QColor from specified string
<END_TASK>
<USER_TASK:>
Description:
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
""" |
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color |
<SYSTEM_TASK:>
Converts the table to a list of spans, for consistency.
<END_TASK>
<USER_TASK:>
Description:
def table_cells_2_spans(table, spans):
"""
Converts the table to a list of spans, for consistency.
This method combines the table data with the span data into a
single, more consistent type. Any normal cell will become a span
of just 1 column and 1 row.
Parameters
----------
table : list of lists of str
spans : list of lists of int
Returns
-------
table : list of lists of lists of int
As you can imagine, this is pretty confusing for a human which
is why data2rst accepts table data and span data separately.
""" |
new_spans = []
for row in range(len(table)):
for column in range(len(table[row])):
span = get_span(spans, row, column)
if not span:
new_spans.append([[row, column]])
new_spans.extend(spans)
new_spans = list(sorted(new_spans))
return new_spans |
<SYSTEM_TASK:>
Gets the number of columns in an html table.
<END_TASK>
<USER_TASK:>
Description:
def get_html_column_count(html_string):
"""
Gets the number of columns in an html table.
Paramters
---------
html_string : str
Returns
-------
int
The number of columns in the table
""" |
try:
from bs4 import BeautifulSoup
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return 0
column_counts = []
trs = table.findAll('tr')
if len(trs) == 0:
return 0
for tr in range(len(trs)):
if tr == 0:
tds = trs[tr].findAll('th')
if len(tds) == 0:
tds = trs[tr].findAll('td')
else:
tds = trs[tr].findAll('td')
count = 0
for td in tds:
if td.has_attr('colspan'):
count += int(td['colspan'])
else:
count += 1
column_counts.append(count)
return max(column_counts) |
<SYSTEM_TASK:>
Add space to start and end of each string in a list of lists
<END_TASK>
<USER_TASK:>
Description:
def add_cushions(table):
"""
Add space to start and end of each string in a list of lists
Parameters
----------
table : list of lists of str
A table of rows of strings. For example::
[
['dog', 'cat', 'bicycle'],
['mouse', trumpet', '']
]
Returns
-------
table : list of lists of str
Note
----
Each cell in an rst grid table should to have a cushion of at least
one space on each side of the string it contains. For example::
+-----+-------+
| foo | bar |
+-----+-------+
| cat | steve |
+-----+-------+
is better than::
+-----+---+
|foo| bar |
+-----+---+
|cat|steve|
+-----+---+
""" |
for row in range(len(table)):
for column in range(len(table[row])):
lines = table[row][column].split("\n")
for i in range(len(lines)):
if not lines[i] == "":
lines[i] = " " + lines[i].rstrip() + " "
table[row][column] = "\n".join(lines)
return table |
<SYSTEM_TASK:>
Reads a newick tree from either a string or a file, and returns
<END_TASK>
<USER_TASK:>
Description:
def read_newick(newick, root_node=None, format=0):
"""
Reads a newick tree from either a string or a file, and returns
an ETE tree structure.
A previously existent node object can be passed as the root of the
tree, which means that all its new children will belong to the same
class as the root (This allows to work with custom TreeNode objects).
You can also take advantage from this behaviour to concatenate
several tree structures.
""" |
## check newick type as a string or filepath, Toytree parses urls to str's
if isinstance(newick, six.string_types):
if os.path.exists(newick):
if newick.endswith('.gz'):
import gzip
nw = gzip.open(newick).read()
else:
nw = open(newick, 'rU').read()
else:
nw = newick
## get re matcher for testing newick formats
matcher = compile_matchers(formatcode=format)
nw = nw.strip()
if not nw.startswith('(') and nw.endswith(';'):
return _read_node_data(nw[:-1], root_node, "single", matcher, format)
elif not nw.startswith('(') or not nw.endswith(';'):
raise NewickError('Unexisting tree file or Malformed newick tree structure.')
else:
return _read_newick_from_string(nw, root_node, matcher, format)
else:
raise NewickError("'newick' argument must be either a filename or a newick string.") |
<SYSTEM_TASK:>
Reads a leaf node from a subpart of the original newicktree
<END_TASK>
<USER_TASK:>
Description:
def _read_node_data(subnw, current_node, node_type, matcher, formatcode):
"""
Reads a leaf node from a subpart of the original newicktree
""" |
if node_type == "leaf" or node_type == "single":
if node_type == "leaf":
node = current_node.add_child()
else:
node = current_node
else:
node = current_node
subnw = subnw.strip()
if not subnw and node_type == 'leaf' and formatcode != 100:
raise NewickError('Empty leaf node found')
elif not subnw:
return
container1, container2, converterFn1, converterFn2, compiled_matcher = matcher[node_type]
data = re.match(compiled_matcher, subnw)
if data:
data = data.groups()
# This prevents ignoring errors even in flexible nodes:
if subnw and data[0] is None and data[1] is None and data[2] is None:
raise NewickError("Unexpected newick format '%s'" %subnw)
if data[0] is not None and data[0] != '':
node.add_feature(container1, converterFn1(data[0].strip()))
if data[1] is not None and data[1] != '':
node.add_feature(container2, converterFn2(data[1][1:].strip()))
if data[2] is not None \
and data[2].startswith("[&&NHX"):
_parse_extra_features(node, data[2])
else:
raise NewickError("Unexpected newick format '%s' " %subnw[0:50])
return |
<SYSTEM_TASK:>
Iteratively export a tree structure and returns its NHX
<END_TASK>
<USER_TASK:>
Description:
def write_newick(rootnode,
features=None,
format=1,
format_root_node=True,
is_leaf_fn=None,
dist_formatter=None,
support_formatter=None,
name_formatter=None):
"""
Iteratively export a tree structure and returns its NHX
representation.
""" |
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
safe_name = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, "name")))
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick) |
<SYSTEM_TASK:>
Generates the extended newick string NHX with extra data about a node.
<END_TASK>
<USER_TASK:>
Description:
def _get_features_string(self, features=None):
""" Generates the extended newick string NHX with extra data about a node.""" |
string = ""
if features is None:
features = []
elif features == []:
features = self.features
for pr in features:
if hasattr(self, pr):
raw = getattr(self, pr)
if type(raw) in ITERABLE_TYPES:
raw = '|'.join([str(i) for i in raw])
elif type(raw) == dict:
raw = '|'.join(
map(lambda x,y: "%s-%s" %(x, y), six.iteritems(raw)))
elif type(raw) == str:
pass
else:
raw = str(raw)
value = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
raw)
if string != "":
string +=":"
string +="%s=%s" %(pr, str(value))
if string != "":
string = "[&&NHX:"+string+"]"
return string |
<SYSTEM_TASK:>
Get the character width of a column in a table
<END_TASK>
<USER_TASK:>
Description:
def get_column_width(column, table):
"""
Get the character width of a column in a table
Parameters
----------
column : int
The column index analyze
table : list of lists of str
The table of rows of strings. For this to be accurate, each
string must only be 1 line long.
Returns
-------
width : int
""" |
width = 3
for row in range(len(table)):
cell_width = len(table[row][column])
if cell_width > width:
width = cell_width
return width |
<SYSTEM_TASK:>
Horizontally center the text within a cell's grid
<END_TASK>
<USER_TASK:>
Description:
def center_cell_text(cell):
"""
Horizontally center the text within a cell's grid
Like this::
+---------+ +---------+
| foo | --> | foo |
+---------+ +---------+
Parameters
----------
cell : dashtable.data2rst.Cell
Returns
-------
cell : dashtable.data2rst.Cell
""" |
lines = cell.text.split('\n')
cell_width = len(lines[0]) - 2
truncated_lines = ['']
for i in range(1, len(lines) - 1):
truncated = lines[i][2:len(lines[i]) - 2].rstrip()
truncated_lines.append(truncated)
truncated_lines.append('')
max_line_length = get_longest_line_length('\n'.join(truncated_lines))
remainder = cell_width - max_line_length
left_width = math.floor(remainder / 2)
left_space = left_width * ' '
for i in range(len(truncated_lines)):
truncated_lines[i] = left_space + truncated_lines[i]
right_width = cell_width - len(truncated_lines[i])
truncated_lines[i] += right_width * ' '
for i in range(1, len(lines) - 1):
lines[i] = ''.join([
lines[i][0], truncated_lines[i], lines[i][-1]
])
cell.text = '\n'.join(lines)
return cell |
<SYSTEM_TASK:>
Force each cell in the table to be a string
<END_TASK>
<USER_TASK:>
Description:
def ensure_table_strings(table):
"""
Force each cell in the table to be a string
Parameters
----------
table : list of lists
Returns
-------
table : list of lists of str
""" |
for row in range(len(table)):
for column in range(len(table[row])):
table[row][column] = str(table[row][column])
return table |
<SYSTEM_TASK:>
The number of sections that touch the left side.
<END_TASK>
<USER_TASK:>
Description:
def left_sections(self):
"""
The number of sections that touch the left side.
During merging, the cell's text will grow to include other
cells. This property keeps track of the number of sections that
are touching the left side. For example::
+-----+-----+
section --> | foo | dog | <-- section
+-----+-----+
section --> | cat |
+-----+
Has 2 sections on the left, but 1 on the right
Returns
-------
sections : int
The number of sections on the left
""" |
lines = self.text.split('\n')
sections = 0
for i in range(len(lines)):
if lines[i].startswith('+'):
sections += 1
sections -= 1
return sections |
<SYSTEM_TASK:>
The number of sections that touch the right side.
<END_TASK>
<USER_TASK:>
Description:
def right_sections(self):
"""
The number of sections that touch the right side.
Returns
-------
sections : int
The number of sections on the right
""" |
lines = self.text.split('\n')
sections = 0
for i in range(len(lines)):
if lines[i].endswith('+'):
sections += 1
return sections - 1 |
<SYSTEM_TASK:>
The number of sections that touch the top side.
<END_TASK>
<USER_TASK:>
Description:
def top_sections(self):
"""
The number of sections that touch the top side.
Returns
-------
sections : int
The number of sections on the top
""" |
top_line = self.text.split('\n')[0]
sections = len(top_line.split('+')) - 2
return sections |
<SYSTEM_TASK:>
The number of cells that touch the bottom side.
<END_TASK>
<USER_TASK:>
Description:
def bottom_sections(self):
"""
The number of cells that touch the bottom side.
Returns
-------
sections : int
The number of sections on the top
""" |
bottom_line = self.text.split('\n')[-1]
sections = len(bottom_line.split('+')) - 2
return sections |
<SYSTEM_TASK:>
Whether or not the cell is a header
<END_TASK>
<USER_TASK:>
Description:
def is_header(self):
"""
Whether or not the cell is a header
Any header cell will have "=" instead of "-" on its border.
For example, this is a header cell::
+-----+
| foo |
+=====+
while this cell is not::
+-----+
| foo |
+-----+
Returns
-------
bool
Whether or not the cell is a header
""" |
bottom_line = self.text.split('\n')[-1]
if is_only(bottom_line, ['+', '=']):
return True
return False |
<SYSTEM_TASK:>
Returns a numeric identifier of the latest git changeset.
<END_TASK>
<USER_TASK:>
Description:
def get_git_changeset(filename=None):
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
""" |
dirname = os.path.dirname(filename or __file__)
git_show = sh('git show --pretty=format:%ct --quiet HEAD',
cwd=dirname)
timestamp = git_show.partition('\n')[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S') |
<SYSTEM_TASK:>
Recursively go through a tag's children, converting them, then
<END_TASK>
<USER_TASK:>
Description:
def process_tag(node):
"""
Recursively go through a tag's children, converting them, then
convert the tag itself.
""" |
text = ''
exceptions = ['table']
for element in node.children:
if isinstance(element, NavigableString):
text += element
elif not node.name in exceptions:
text += process_tag(element)
try:
convert_fn = globals()["convert_%s" % node.name.lower()]
text = convert_fn(node, text)
except KeyError:
pass
return text |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.