text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Finds all the biconnected components in a graph.
<END_TASK>
<USER_TASK:>
Description:
def find_biconnected_components(graph):
"""Finds all the biconnected components in a graph.
Returns a list of lists, each containing the edges that form a biconnected component.
Returns an empty list for an empty graph.
""" |
list_of_components = []
# Run the algorithm on each of the connected components of the graph
components = get_connected_components_as_subgraphs(graph)
for component in components:
# --Call the internal biconnnected components function to find
# --the edge lists for this particular connected component
edge_list = _internal_get_biconnected_components_edge_lists(component)
list_of_components.extend(edge_list)
return list_of_components |
<SYSTEM_TASK:>
Finds the biconnected components and returns them as subgraphs.
<END_TASK>
<USER_TASK:>
Description:
def find_biconnected_components_as_subgraphs(graph):
"""Finds the biconnected components and returns them as subgraphs.""" |
list_of_graphs = []
list_of_components = find_biconnected_components(graph)
for edge_list in list_of_components:
subgraph = get_subgraph_from_edge_list(graph, edge_list)
list_of_graphs.append(subgraph)
return list_of_graphs |
<SYSTEM_TASK:>
Finds all of the articulation vertices within a graph.
<END_TASK>
<USER_TASK:>
Description:
def find_articulation_vertices(graph):
"""Finds all of the articulation vertices within a graph.
Returns a list of all articulation vertices within the graph.
Returns an empty list for an empty graph.
""" |
articulation_vertices = []
all_nodes = graph.get_all_node_ids()
if len(all_nodes) == 0:
return articulation_vertices
# Run the algorithm on each of the connected components of the graph
components = get_connected_components_as_subgraphs(graph)
for component in components:
# --Call the internal articulation vertices function to find
# --the node list for this particular connected component
vertex_list = _internal_get_cut_vertex_list(component)
articulation_vertices.extend(vertex_list)
return articulation_vertices |
<SYSTEM_TASK:>
Helper function to pop edges off the stack and produce a list of them.
<END_TASK>
<USER_TASK:>
Description:
def output_component(graph, edge_stack, u, v):
"""Helper function to pop edges off the stack and produce a list of them.""" |
edge_list = []
while len(edge_stack) > 0:
edge_id = edge_stack.popleft()
edge_list.append(edge_id)
edge = graph.get_edge(edge_id)
tpl_a = (u, v)
tpl_b = (v, u)
if tpl_a == edge['vertices'] or tpl_b == edge['vertices']:
break
return edge_list |
<SYSTEM_TASK:>
Performs a depth-first search with visiting order of nodes determined by provided adjacency lists,
<END_TASK>
<USER_TASK:>
Description:
def depth_first_search_with_parent_data(graph, root_node = None, adjacency_lists = None):
"""Performs a depth-first search with visiting order of nodes determined by provided adjacency lists,
and also returns a parent lookup dict and a children lookup dict.""" |
ordering = []
parent_lookup = {}
children_lookup = defaultdict(lambda: [])
all_nodes = graph.get_all_node_ids()
if not all_nodes:
return ordering, parent_lookup, children_lookup
stack = deque()
discovered = defaultdict(lambda: False)
unvisited_nodes = set(all_nodes)
if root_node is None:
root_node = all_nodes[0]
if adjacency_lists is None:
adj = lambda v: graph.neighbors(v)
else:
adj = lambda v: adjacency_lists[v]
# --Initialize the stack, simulating the DFS call on the root node
stack.appendleft(root_node)
parent_lookup[root_node] = root_node
# We're using a non-recursive implementation of DFS, since Python isn't great for deep recursion
while True:
# Main DFS Loop
while len(stack) > 0:
u = stack.popleft()
if not discovered[u]:
discovered[u] = True
if u in unvisited_nodes:
unvisited_nodes.remove(u)
ordering.append(u)
neighbors = adj(u)
# When adding the new nodes to the stack, we want to add them in reverse order so that
# the order the nodes are visited is the same as with a recursive DFS implementation
for n in neighbors[::-1]:
if discovered[n]:
# If the node already exists in the discovered nodes list
# we don't want to re-add it to the stack
continue
stack.appendleft(n)
parent_lookup[n] = u
children_lookup[u].append(n)
# While there are still nodes that need visiting, repopulate the stack
if len(unvisited_nodes) > 0:
u = unvisited_nodes.pop()
stack.appendleft(u)
else:
break
return ordering, parent_lookup, children_lookup |
<SYSTEM_TASK:>
Produces a DOT specification string from the provided graph.
<END_TASK>
<USER_TASK:>
Description:
def graph_to_dot(graph, node_renderer=None, edge_renderer=None):
"""Produces a DOT specification string from the provided graph.""" |
node_pairs = list(graph.nodes.items())
edge_pairs = list(graph.edges.items())
if node_renderer is None:
node_renderer_wrapper = lambda nid: ''
else:
node_renderer_wrapper = lambda nid: ' [%s]' % ','.join(
['%s=%s' % tpl for tpl in list(node_renderer(graph, nid).items())])
# Start the graph
graph_string = 'digraph G {\n'
graph_string += 'overlap=scale;\n'
# Print the nodes (placeholder)
for node_id, node in node_pairs:
graph_string += '%i%s;\n' % (node_id, node_renderer_wrapper(node_id))
# Print the edges
for edge_id, edge in edge_pairs:
node_a = edge['vertices'][0]
node_b = edge['vertices'][1]
graph_string += '%i -> %i;\n' % (node_a, node_b)
# Finish the graph
graph_string += '}'
return graph_string |
<SYSTEM_TASK:>
Finds all connected components of the graph.
<END_TASK>
<USER_TASK:>
Description:
def get_connected_components(graph):
"""Finds all connected components of the graph.
Returns a list of lists, each containing the nodes that form a connected component.
Returns an empty list for an empty graph.
""" |
list_of_components = []
component = [] # Not strictly necessary due to the while loop structure, but it helps the automated analysis tools
# Store a list of all unreached vertices
unreached = set(graph.get_all_node_ids())
to_explore = deque()
while len(unreached) > 0:
# This happens when we reach the end of a connected component and still have more vertices to search through
if len(to_explore) == 0:
n = unreached.pop()
unreached.add(n)
to_explore.append(n)
component = []
list_of_components.append(component)
# This is the BFS that searches for connected vertices
while len(to_explore) > 0:
n = to_explore.pop()
if n in unreached:
component.append(n)
unreached.remove(n)
nodes = graph.neighbors(n)
for n in nodes:
if n in unreached:
to_explore.append(n)
return list_of_components |
<SYSTEM_TASK:>
Finds all connected components of the graph.
<END_TASK>
<USER_TASK:>
Description:
def get_connected_components_as_subgraphs(graph):
"""Finds all connected components of the graph.
Returns a list of graph objects, each representing a connected component.
Returns an empty list for an empty graph.
""" |
components = get_connected_components(graph)
list_of_graphs = []
for c in components:
edge_ids = set()
nodes = [graph.get_node(node) for node in c]
for n in nodes:
# --Loop through the edges in each node, to determine if it should be included
for e in n['edges']:
# --Only add the edge to the subgraph if both ends are in the subgraph
edge = graph.get_edge(e)
a, b = edge['vertices']
if a in c and b in c:
edge_ids.add(e)
# --Build the subgraph and add it to the list
list_of_edges = list(edge_ids)
subgraph = make_subgraph(graph, c, list_of_edges)
list_of_graphs.append(subgraph)
return list_of_graphs |
<SYSTEM_TASK:>
Adds a new, undirected edge between node_a and node_b with a cost.
<END_TASK>
<USER_TASK:>
Description:
def new_edge(self, node_a, node_b, cost=1):
"""Adds a new, undirected edge between node_a and node_b with a cost.
Returns the edge id of the new edge.""" |
edge_id = super(UndirectedGraph, self).new_edge(node_a, node_b, cost)
self.nodes[node_b]['edges'].append(edge_id)
return edge_id |
<SYSTEM_TASK:>
Removes the edge identified by "edge_id" from the graph.
<END_TASK>
<USER_TASK:>
Description:
def delete_edge_by_id(self, edge_id):
"""Removes the edge identified by "edge_id" from the graph.""" |
edge = self.get_edge(edge_id)
# Remove the edge from the "from node"
# --Determine the from node
from_node_id = edge['vertices'][0]
from_node = self.get_node(from_node_id)
# --Remove the edge from it
from_node['edges'].remove(edge_id)
# Remove the edge from the "to node"
to_node_id = edge['vertices'][1]
to_node = self.get_node(to_node_id)
# --Remove the edge from it
to_node['edges'].remove(edge_id)
# Remove the edge from the edge list
del self.edges[edge_id]
self._num_edges -= 1 |
<SYSTEM_TASK:>
Calculates a minimum spanning tree for a graph.
<END_TASK>
<USER_TASK:>
Description:
def find_minimum_spanning_tree(graph):
"""Calculates a minimum spanning tree for a graph.
Returns a list of edges that define the tree.
Returns an empty list for an empty graph.
""" |
mst = []
if graph.num_nodes() == 0:
return mst
if graph.num_edges() == 0:
return mst
connected_components = get_connected_components(graph)
if len(connected_components) > 1:
raise DisconnectedGraphError
edge_list = kruskal_mst(graph)
return edge_list |
<SYSTEM_TASK:>
Calculates a minimum spanning tree and returns a graph representation.
<END_TASK>
<USER_TASK:>
Description:
def find_minimum_spanning_tree_as_subgraph(graph):
"""Calculates a minimum spanning tree and returns a graph representation.""" |
edge_list = find_minimum_spanning_tree(graph)
subgraph = get_subgraph_from_edge_list(graph, edge_list)
return subgraph |
<SYSTEM_TASK:>
Calculates the minimum spanning forest of a disconnected graph.
<END_TASK>
<USER_TASK:>
Description:
def find_minimum_spanning_forest(graph):
"""Calculates the minimum spanning forest of a disconnected graph.
Returns a list of lists, each containing the edges that define that tree.
Returns an empty list for an empty graph.
""" |
msf = []
if graph.num_nodes() == 0:
return msf
if graph.num_edges() == 0:
return msf
connected_components = get_connected_components_as_subgraphs(graph)
for subgraph in connected_components:
edge_list = kruskal_mst(subgraph)
msf.append(edge_list)
return msf |
<SYSTEM_TASK:>
Calculates the minimum spanning forest and returns a list of trees as subgraphs.
<END_TASK>
<USER_TASK:>
Description:
def find_minimum_spanning_forest_as_subgraphs(graph):
"""Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" |
forest = find_minimum_spanning_forest(graph)
list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest]
return list_of_subgraphs |
<SYSTEM_TASK:>
Implements Kruskal's Algorithm for finding minimum spanning trees.
<END_TASK>
<USER_TASK:>
Description:
def kruskal_mst(graph):
"""Implements Kruskal's Algorithm for finding minimum spanning trees.
Assumes a non-empty, connected graph.
""" |
edges_accepted = 0
ds = DisjointSet()
pq = PriorityQueue()
accepted_edges = []
label_lookup = {}
nodes = graph.get_all_node_ids()
num_vertices = len(nodes)
for n in nodes:
label = ds.add_set()
label_lookup[n] = label
edges = graph.get_all_edge_objects()
for e in edges:
pq.put(e['id'], e['cost'])
while edges_accepted < (num_vertices - 1):
edge_id = pq.get()
edge = graph.get_edge(edge_id)
node_a, node_b = edge['vertices']
label_a = label_lookup[node_a]
label_b = label_lookup[node_b]
a_set = ds.find(label_a)
b_set = ds.find(label_b)
if a_set != b_set:
edges_accepted += 1
accepted_edges.append(edge_id)
ds.union(a_set, b_set)
return accepted_edges |
<SYSTEM_TASK:>
Gets the main cycle of the dfs tree.
<END_TASK>
<USER_TASK:>
Description:
def __get_cycle(graph, ordering, parent_lookup):
"""Gets the main cycle of the dfs tree.""" |
root_node = ordering[0]
for i in range(2, len(ordering)):
current_node = ordering[i]
if graph.adjacent(current_node, root_node):
path = []
while current_node != root_node:
path.append(current_node)
current_node = parent_lookup[current_node]
path.append(root_node)
path.reverse()
return path |
<SYSTEM_TASK:>
Calculates the segments that can emanate from a particular node on the main cycle.
<END_TASK>
<USER_TASK:>
Description:
def __get_segments_from_node(node, graph):
"""Calculates the segments that can emanate from a particular node on the main cycle.""" |
list_of_segments = []
node_object = graph.get_node(node)
for e in node_object['edges']:
list_of_segments.append(e)
return list_of_segments |
<SYSTEM_TASK:>
Calculates the segments that emanate from the main cycle.
<END_TASK>
<USER_TASK:>
Description:
def __get_segments_from_cycle(graph, cycle_path):
"""Calculates the segments that emanate from the main cycle.""" |
list_of_segments = []
# We work through the cycle in a bottom-up fashion
for n in cycle_path[::-1]:
segments = __get_segments_from_node(n, graph)
if segments:
list_of_segments.append(segments)
return list_of_segments |
<SYSTEM_TASK:>
Converts a subgraph given by a list of vertices and edges into a graph object.
<END_TASK>
<USER_TASK:>
Description:
def make_subgraph(graph, vertices, edges):
"""Converts a subgraph given by a list of vertices and edges into a graph object.""" |
# Copy the entire graph
local_graph = copy.deepcopy(graph)
# Remove all the edges that aren't in the list
edges_to_delete = [x for x in local_graph.get_all_edge_ids() if x not in edges]
for e in edges_to_delete:
local_graph.delete_edge_by_id(e)
# Remove all the vertices that aren't in the list
nodes_to_delete = [x for x in local_graph.get_all_node_ids() if x not in vertices]
for n in nodes_to_delete:
local_graph.delete_node(n)
return local_graph |
<SYSTEM_TASK:>
Converts a directed graph into an undirected graph. Directed edges are made undirected.
<END_TASK>
<USER_TASK:>
Description:
def convert_graph_directed_to_undirected(dg):
"""Converts a directed graph into an undirected graph. Directed edges are made undirected.""" |
udg = UndirectedGraph()
# Copy the graph
# --Copy nodes
# --Copy edges
udg.nodes = copy.deepcopy(dg.nodes)
udg.edges = copy.deepcopy(dg.edges)
udg.next_node_id = dg.next_node_id
udg.next_edge_id = dg.next_edge_id
# Convert the directed edges into undirected edges
for edge_id in udg.get_all_edge_ids():
edge = udg.get_edge(edge_id)
target_node_id = edge['vertices'][1]
target_node = udg.get_node(target_node_id)
target_node['edges'].append(edge_id)
return udg |
<SYSTEM_TASK:>
Removes duplicate edges from a directed graph.
<END_TASK>
<USER_TASK:>
Description:
def remove_duplicate_edges_directed(dg):
"""Removes duplicate edges from a directed graph.""" |
# With directed edges, we can just hash the to and from node id tuples and if
# a node happens to conflict with one that already exists, we delete it
# --For aesthetic, we sort the edge ids so that lower edge ids are kept
lookup = {}
edges = sorted(dg.get_all_edge_ids())
for edge_id in edges:
e = dg.get_edge(edge_id)
tpl = e['vertices']
if tpl in lookup:
dg.delete_edge_by_id(edge_id)
else:
lookup[tpl] = edge_id |
<SYSTEM_TASK:>
Removes duplicate edges from an undirected graph.
<END_TASK>
<USER_TASK:>
Description:
def remove_duplicate_edges_undirected(udg):
"""Removes duplicate edges from an undirected graph.""" |
# With undirected edges, we need to hash both combinations of the to-from node ids, since a-b and b-a are equivalent
# --For aesthetic, we sort the edge ids so that lower edges ids are kept
lookup = {}
edges = sorted(udg.get_all_edge_ids())
for edge_id in edges:
e = udg.get_edge(edge_id)
tpl_a = e['vertices']
tpl_b = (tpl_a[1], tpl_a[0])
if tpl_a in lookup or tpl_b in lookup:
udg.delete_edge_by_id(edge_id)
else:
lookup[tpl_a] = edge_id
lookup[tpl_b] = edge_id |
<SYSTEM_TASK:>
Transforms a list of edges into a list of the nodes those edges connect.
<END_TASK>
<USER_TASK:>
Description:
def get_vertices_from_edge_list(graph, edge_list):
"""Transforms a list of edges into a list of the nodes those edges connect.
Returns a list of nodes, or an empty list if given an empty list.
""" |
node_set = set()
for edge_id in edge_list:
edge = graph.get_edge(edge_id)
a, b = edge['vertices']
node_set.add(a)
node_set.add(b)
return list(node_set) |
<SYSTEM_TASK:>
Transforms a list of edges into a subgraph.
<END_TASK>
<USER_TASK:>
Description:
def get_subgraph_from_edge_list(graph, edge_list):
"""Transforms a list of edges into a subgraph.""" |
node_list = get_vertices_from_edge_list(graph, edge_list)
subgraph = make_subgraph(graph, node_list, edge_list)
return subgraph |
<SYSTEM_TASK:>
Merges an ''addition_graph'' into the ''main_graph''.
<END_TASK>
<USER_TASK:>
Description:
def merge_graphs(main_graph, addition_graph):
"""Merges an ''addition_graph'' into the ''main_graph''.
Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.
""" |
node_mapping = {}
edge_mapping = {}
for node in addition_graph.get_all_node_objects():
node_id = node['id']
new_id = main_graph.new_node()
node_mapping[node_id] = new_id
for edge in addition_graph.get_all_edge_objects():
edge_id = edge['id']
old_vertex_a_id, old_vertex_b_id = edge['vertices']
new_vertex_a_id = node_mapping[old_vertex_a_id]
new_vertex_b_id = node_mapping[old_vertex_b_id]
new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id)
edge_mapping[edge_id] = new_edge_id
return node_mapping, edge_mapping |
<SYSTEM_TASK:>
Generates a graph from an adjacency matrix specification.
<END_TASK>
<USER_TASK:>
Description:
def create_graph_from_adjacency_matrix(adjacency_matrix):
"""Generates a graph from an adjacency matrix specification.
Returns a tuple containing the graph and a list-mapping of node ids to matrix column indices.
The graph will be an UndirectedGraph if the provided adjacency matrix is symmetric.
The graph will be a DirectedGraph if the provided adjacency matrix is not symmetric.
Ref: http://mathworld.wolfram.com/AdjacencyMatrix.html""" |
if is_adjacency_matrix_symmetric(adjacency_matrix):
graph = UndirectedGraph()
else:
graph = DirectedGraph()
node_column_mapping = []
num_columns = len(adjacency_matrix)
for _ in range(num_columns):
node_id = graph.new_node()
node_column_mapping.append(node_id)
for j in range(num_columns):
for i in range(num_columns):
if adjacency_matrix[j][i]:
jnode_id = node_column_mapping[j]
inode_id = node_column_mapping[i]
# Because of our adjacency matrix encoding, [j][i] in our code corresponds to [i][j] in a traditional matrix interpretation
# Thus, we need to put an edge from node i to node j if [j][i] in our code is non-zero
graph.new_edge(inode_id, jnode_id)
return (graph, node_column_mapping) |
<SYSTEM_TASK:>
Adds a new set to the forest.
<END_TASK>
<USER_TASK:>
Description:
def add_set(self):
"""Adds a new set to the forest.
Returns a label by which the new set can be referenced
""" |
self.__label_counter += 1
new_label = self.__label_counter
self.__forest[new_label] = -1 # All new sets have their parent set to themselves
self.__set_counter += 1
return new_label |
<SYSTEM_TASK:>
Finds the set containing the node_label.
<END_TASK>
<USER_TASK:>
Description:
def find(self, node_label):
"""Finds the set containing the node_label.
Returns the set label.
""" |
queue = []
current_node = node_label
while self.__forest[current_node] >= 0:
queue.append(current_node)
current_node = self.__forest[current_node]
root_node = current_node
# Path compression
for n in queue:
self.__forest[n] = root_node
return root_node |
<SYSTEM_TASK:>
Joins two sets into a single new set.
<END_TASK>
<USER_TASK:>
Description:
def union(self, label_a, label_b):
"""Joins two sets into a single new set.
label_a, label_b can be any nodes within the sets
""" |
# Base case to avoid work
if label_a == label_b:
return
# Find the tree root of each node
root_a = self.find(label_a)
root_b = self.find(label_b)
# Avoid merging a tree to itself
if root_a == root_b:
return
self.__internal_union(root_a, root_b)
self.__set_counter -= 1 |
<SYSTEM_TASK:>
Internal function to join two set trees specified by root_a and root_b.
<END_TASK>
<USER_TASK:>
Description:
def __internal_union(self, root_a, root_b):
"""Internal function to join two set trees specified by root_a and root_b.
Assumes root_a and root_b are distinct.
""" |
# Merge the trees, smaller to larger
update_rank = False
# --Determine the larger tree
rank_a = self.__forest[root_a]
rank_b = self.__forest[root_b]
if rank_a < rank_b:
larger = root_b
smaller = root_a
else:
larger = root_a
smaller = root_b
if rank_a == rank_b:
update_rank = True
# --Make the smaller tree a subtree of the larger tree
self.__forest[smaller] = larger
# --Update the rank of the new tree (if necessary)
if update_rank:
self.__forest[larger] -= 1 |
<SYSTEM_TASK:>
Determines whether a graph is planar or not.
<END_TASK>
<USER_TASK:>
Description:
def is_planar(graph):
"""Determines whether a graph is planar or not.""" |
# Determine connected components as subgraphs; their planarity is independent of each other
connected_components = get_connected_components_as_subgraphs(graph)
for component in connected_components:
# Biconnected components likewise have independent planarity
biconnected_components = find_biconnected_components_as_subgraphs(component)
for bi_component in biconnected_components:
planarity = __is_subgraph_planar(bi_component)
if not planarity:
return False
return True |
<SYSTEM_TASK:>
Internal function to determine if a subgraph is planar.
<END_TASK>
<USER_TASK:>
Description:
def __is_subgraph_planar(graph):
"""Internal function to determine if a subgraph is planar.""" |
# --First pass: Determine edge and vertex counts validate Euler's Formula
num_nodes = graph.num_nodes()
num_edges = graph.num_edges()
# --We can guarantee that if there are 4 or less nodes, then the graph is planar
# --A 4-node simple graph has a maximum of 6 possible edges (K4); this will always satisfy Euler's Formula:
# -- 6 <= 3(4 - 2)
if num_nodes < 5:
return True
if num_edges > 3*(num_nodes - 2):
return False
# --At this point, we have no choice but to run the calculation the hard way
return kocay_planarity_test(graph) |
<SYSTEM_TASK:>
Sets up the dfs_data object, for consistency.
<END_TASK>
<USER_TASK:>
Description:
def __setup_dfs_data(graph, adj):
"""Sets up the dfs_data object, for consistency.""" |
dfs_data = __get_dfs_data(graph, adj)
dfs_data['graph'] = graph
dfs_data['adj'] = adj
L1, L2 = __low_point_dfs(dfs_data)
dfs_data['lowpoint_1_lookup'] = L1
dfs_data['lowpoint_2_lookup'] = L2
edge_weights = __calculate_edge_weights(dfs_data)
dfs_data['edge_weights'] = edge_weights
return dfs_data |
<SYSTEM_TASK:>
Calculates the weight of each edge, for embedding-order sorting.
<END_TASK>
<USER_TASK:>
Description:
def __calculate_edge_weights(dfs_data):
"""Calculates the weight of each edge, for embedding-order sorting.""" |
graph = dfs_data['graph']
weights = {}
for edge_id in graph.get_all_edge_ids():
edge_weight = __edge_weight(edge_id, dfs_data)
weights[edge_id] = edge_weight
return weights |
<SYSTEM_TASK:>
Sorts the adjacency list representation by the edge weights.
<END_TASK>
<USER_TASK:>
Description:
def __sort_adjacency_lists(dfs_data):
"""Sorts the adjacency list representation by the edge weights.""" |
new_adjacency_lists = {}
adjacency_lists = dfs_data['adj']
edge_weights = dfs_data['edge_weights']
edge_lookup = dfs_data['edge_lookup']
for node_id, adj_list in list(adjacency_lists.items()):
node_weight_lookup = {}
frond_lookup = {}
for node_b in adj_list:
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(node_id, node_b)
node_weight_lookup[node_b] = edge_weights[edge_id]
frond_lookup[node_b] = 1 if edge_lookup[edge_id] == 'backedge' else 2
# Fronds should be before branches if the weights are equal
new_list = sorted(adj_list, key=lambda n: frond_lookup[n])
# Sort by weights
new_list.sort(key=lambda n: node_weight_lookup[n])
# Add the new sorted list to the new adjacency list lookup table
new_adjacency_lists[node_id] = new_list
return new_adjacency_lists |
<SYSTEM_TASK:>
A recursive implementation of the BranchPtDFS function, as defined on page 14 of the paper.
<END_TASK>
<USER_TASK:>
Description:
def __branch_point_dfs_recursive(u, large_n, b, stem, dfs_data):
"""A recursive implementation of the BranchPtDFS function, as defined on page 14 of the paper.""" |
first_vertex = dfs_data['adj'][u][0]
large_w = wt(u, first_vertex, dfs_data)
if large_w % 2 == 0:
large_w += 1
v_I = 0
v_II = 0
for v in [v for v in dfs_data['adj'][u] if wt(u, v, dfs_data) <= large_w]:
stem[u] = v # not in the original paper, but a logical extension based on page 13
if a(v, dfs_data) == u: # uv is a tree edge
large_n[v] = 0
if wt(u, v, dfs_data) % 2 == 0:
v_I = v
else:
b_u = b[u]
l2_v = L2(v, dfs_data)
#if l2_v > b_u:
# If this is true, then we're not on a branch at all
# continue
if l2_v < b_u:
large_n[v] = 1
elif b_u != 1:
#print stem
#print dfs_data['lowpoint_2_lookup']
#print b
xnode = stem[l2_v]
if large_n[xnode] != 0:
large_n[v] = large_n[xnode] + 1
elif dfs_data['graph'].adjacent(u, L1(v, dfs_data)):
large_n[v] = 2
else:
large_n[v] = large_n[u]
if large_n[v] % 2 == 0:
v_II = v
break # Goto 1
if v_II != 0:
# Move v_II to head of Adj[u]
dfs_data['adj'][u].remove(v_II)
dfs_data['adj'][u].insert(0, v_II)
elif v_I != 0:
# Move v_I to head of Adj[u]
dfs_data['adj'][u].remove(v_I)
dfs_data['adj'][u].insert(0, v_I)
first_time = True
for v in dfs_data['adj'][u]:
if a(v, dfs_data) == u:
b[v] = u
if first_time:
b[v] = b[u]
elif wt(u, v, dfs_data) % 2 == 0:
large_n[v] = 0
else:
large_n[v] = 1
stem[u] = v
__branch_point_dfs_recursive(v, large_n, b, stem, dfs_data)
first_time = False
return |
<SYSTEM_TASK:>
Builds the combinatorial embedding of the graph. Returns whether the graph is planar.
<END_TASK>
<USER_TASK:>
Description:
def __embed_branch(dfs_data):
"""Builds the combinatorial embedding of the graph. Returns whether the graph is planar.""" |
u = dfs_data['ordering'][0]
dfs_data['LF'] = []
dfs_data['RF'] = []
dfs_data['FG'] = {}
n = dfs_data['graph'].num_nodes()
f0 = (0, n)
g0 = (0, n)
L0 = {'u': 0, 'v': n}
R0 = {'x': 0, 'y': n}
dfs_data['LF'].append(f0)
dfs_data['RF'].append(g0)
dfs_data['FG'][0] = [L0, R0]
dfs_data['FG']['m'] = 0
dfs_data['FG']['l'] = 0
dfs_data['FG']['r'] = 0
#print 'DFS Ordering: {}'.format(dfs_data['ordering'])
#for node in dfs_data['ordering']:
#print '{}: {}'.format(node, dfs_data['adj'][node])
nonplanar = __embed_branch_recursive(u, dfs_data)
#print "Nonplanar:", nonplanar
return not nonplanar |
<SYSTEM_TASK:>
A recursive implementation of the EmbedBranch function, as defined on pages 8 and 22 of the paper.
<END_TASK>
<USER_TASK:>
Description:
def __embed_branch_recursive(u, dfs_data):
"""A recursive implementation of the EmbedBranch function, as defined on pages 8 and 22 of the paper.""" |
#print "\nu: {}\nadj: {}".format(u, dfs_data['adj'][u])
#print 'Pre-inserts'
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
for v in dfs_data['adj'][u]:
#print "\nu, v: {}, {}".format(u, v)
#print "dfs_u, dfs_v: {}, {}".format(D(u, dfs_data), D(v, dfs_data))
nonplanar = True
if a(v, dfs_data) == u:
#print 'Ancestor block entered:', u, v
if b(v, dfs_data) == u:
successful = __insert_branch(u, v, dfs_data)
if not successful:
#print 'InsertBranch({}, {}) Failed'.format(u, v)
nonplanar = True
return nonplanar
nonplanar = __embed_branch_recursive(v, dfs_data)
if nonplanar:
return nonplanar
elif is_frond(u, v, dfs_data):
#print 'Frond block entered:', u, v
successful = __embed_frond(u, v, dfs_data)
if not successful:
#print 'EmbedFrond({}, {}) Failed'.format(u, v)
nonplanar = True
return nonplanar
#print 'Post EmbedFrond'
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
else:
# This block is totally valid, and there will be multiple cases when it gets hit.
# We only want to do things if an edge is a tree edge (parent to child along the spine of the DFS tree),
# or if it's a frond edge (an edge moving up the tree from lower along the spine).
# Every non-tree edge will eventually get handled by the frond edge code as we recurse up the spine.
pass
#print "{}: Should be planar".format(u)
#print 'Post-inserts'
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
nonplanar = False
return nonplanar |
<SYSTEM_TASK:>
Embeds a frond uw into either LF or RF. Returns whether the embedding was successful.
<END_TASK>
<USER_TASK:>
Description:
def __embed_frond(node_u, node_w, dfs_data, as_branch_marker=False):
"""Embeds a frond uw into either LF or RF. Returns whether the embedding was successful.""" |
d_u = D(node_u, dfs_data)
d_w = D(node_w, dfs_data)
comp_d_w = abs(d_w)
if as_branch_marker:
d_w *= -1
if dfs_data['last_inserted_side'] == 'LF':
__insert_frond_RF(d_w, d_u, dfs_data)
else:
# We default to inserting a branch marker on the left side, unless we know otherwise
__insert_frond_LF(d_w, d_u, dfs_data)
return True
LF = dfs_data['LF']
m = dfs_data['FG']['m']
l_w = lw(dfs_data)
r_w = rw(dfs_data)
u_m = u(m, dfs_data)
x_m = fn_x(m, dfs_data)
# There are multiple cases for both u and w
# --Detect the case for u and store it for handling once the case for w is determined
case_1 = False
case_2 = False
case_3 = False
if d_u > u_m and d_u > x_m:
case_1 = True
elif d_u <= u_m and d_u > x_m:
case_2 = True
elif d_u > u_m and d_u <= x_m:
case_3 = True
else:
# We should never get here, return false because there's no way we can embed this frond
#print "Invalid u-case detected: (d_u, u_m, x_m): ({}, {}, {})".format(d_u, u_m, x_m)
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
return False
# --Detect the case for w and process the edge appropriately
if comp_d_w >= l_w and comp_d_w >= r_w:
# Case 4
#print "w-case 4 reached"
# --We do the same thing for all three u-cases: Add the frond to the left side
__insert_frond_LF(d_w, d_u, dfs_data)
dfs_data['FG']['m'] += 1
m = dfs_data['FG']['m']
n = dfs_data['graph'].num_nodes()
Lm = {'u': d_w, 'v': d_u}
Rm = {'x': n, 'y': 0} # See page 17 for how we deal with Ri being empty
#Rm = {}
dfs_data['FG'][m] = [Lm, Rm]
return True
elif comp_d_w >= l_w and comp_d_w < r_w:
# Case 5
#print "w-case 5 reached"
return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
elif comp_d_w < l_w and comp_d_w >= r_w:
# Case 6
#print "w-case 6 reached"
return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
elif comp_d_w < l_w and comp_d_w < r_w:
# Case 7
#print "w-case 7 reached"
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
#print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w)
#print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m)
while comp_d_w < l_w and comp_d_w < r_w:
if d_u > u_m and d_u > x_m:
#print "Nonplanar case reached: u-case 1, w-case 7"
#print "FG: {}".format(dfs_data['FG'])
#print "LF: {}".format(dfs_data['LF'])
#print "RF: {}".format(dfs_data['RF'])
#print "(d_w, l_w, r_w): ({}, {}, {})".format(d_w, l_w, r_w)
#print "(d_u, u_m, x_m, m): ({}, {}, {}, {})".format(d_u, u_m, x_m, m)
return False
switch_sides(d_u, dfs_data)
# --Update the local variables fo the next loop iteration
l_w = lw(dfs_data)
r_w = rw(dfs_data)
m = dfs_data['FG']['m']
u_m = u(m, dfs_data)
x_m = fn_x(m, dfs_data)
case_1 = False
case_2 = False
case_3 = False
if d_u <= u_m and d_u > x_m:
case_2 = True
elif d_u > u_m and d_u <= x_m:
case_3 = True
if comp_d_w >= l_w and comp_d_w < r_w:
# Case 5 redux
#print "w-case 5 redux reached"
return __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
if comp_d_w < l_w and comp_d_w >= r_w:
# Case 6 redux
#print "w-case 6 redux reached"
return __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data)
else:
# We should never get here, return false because there's no way we can embed this frond
#print "Invalid w-case detected"
return False
# We really shouldn't get to this point, but this is a catch-all just in case
#print "Failure catchall reached"
return False |
<SYSTEM_TASK:>
Encapsulates the process of inserting a frond uw into the right side frond group.
<END_TASK>
<USER_TASK:>
Description:
def __insert_frond_RF(d_w, d_u, dfs_data):
"""Encapsulates the process of inserting a frond uw into the right side frond group.""" |
# --Add the frond to the right side
dfs_data['RF'].append( (d_w, d_u) )
dfs_data['FG']['r'] += 1
dfs_data['last_inserted_side'] = 'RF' |
<SYSTEM_TASK:>
Encapsulates the process of inserting a frond uw into the left side frond group.
<END_TASK>
<USER_TASK:>
Description:
def __insert_frond_LF(d_w, d_u, dfs_data):
"""Encapsulates the process of inserting a frond uw into the left side frond group.""" |
# --Add the frond to the left side
dfs_data['LF'].append( (d_w, d_u) )
dfs_data['FG']['l'] += 1
dfs_data['last_inserted_side'] = 'LF' |
<SYSTEM_TASK:>
Merges Fm-1 and Fm, as defined on page 19 of the paper.
<END_TASK>
<USER_TASK:>
Description:
def merge_Fm(dfs_data):
"""Merges Fm-1 and Fm, as defined on page 19 of the paper.""" |
FG = dfs_data['FG']
m = FG['m']
FGm = FG[m]
FGm1 = FG[m-1]
if FGm[0]['u'] < FGm1[0]['u']:
FGm1[0]['u'] = FGm[0]['u']
if FGm[0]['v'] > FGm1[0]['v']:
FGm1[0]['v'] = FGm[0]['v']
if FGm[1]['x'] < FGm1[1]['x']:
FGm1[1]['x'] = FGm[1]['x']
if FGm[1]['y'] > FGm1[1]['y']:
FGm1[1]['y'] = FGm[1]['y']
del FG[m]
FG['m'] -= 1 |
<SYSTEM_TASK:>
Checks to see if the frond xy will conflict with a frond on the left side of the embedding.
<END_TASK>
<USER_TASK:>
Description:
def __check_left_side_conflict(x, y, dfs_data):
"""Checks to see if the frond xy will conflict with a frond on the left side of the embedding.""" |
l = dfs_data['FG']['l']
w, z = dfs_data['LF'][l]
return __check_conflict_fronds(x, y, w, z, dfs_data) |
<SYSTEM_TASK:>
Checks to see if the frond xy will conflict with a frond on the right side of the embedding.
<END_TASK>
<USER_TASK:>
Description:
def __check_right_side_conflict(x, y, dfs_data):
"""Checks to see if the frond xy will conflict with a frond on the right side of the embedding.""" |
r = dfs_data['FG']['r']
w, z = dfs_data['RF'][r]
return __check_conflict_fronds(x, y, w, z, dfs_data) |
<SYSTEM_TASK:>
Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise.
<END_TASK>
<USER_TASK:>
Description:
def __check_conflict_fronds(x, y, w, z, dfs_data):
"""Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise.""" |
# Case 1: False frond and corresponding branch marker
# --x and w should both be negative, and either xy or wz should be the same value uu
if x < 0 and w < 0 and (x == y or w == z):
# --Determine if the marker and frond correspond (have the same low-value)
if x == w:
return True
return False
# Case 2: Fronds with an overlap
if b(x, dfs_data) == b(w, dfs_data) and x > w and w > y and y > z:
return False
# Case 3: Branch marker and a frond on that branch
if x < 0 or w < 0:
# --Determine which one is the branch marker
if x < 0:
u = abs(x)
t = y
x = w
y = z
else:
u = abs(w)
t = z
# --Run the rest of the tests
if b(x, dfs_data) == u and y < u and \
(x, y) in __dfsify_branch_uv(u, t, dfs_data):
return True
return False
# If non of the conflict conditions were met, then there are obviously no conflicts
return False |
<SYSTEM_TASK:>
Builds an adjacency list representation for the graph, since we can't guarantee that the
<END_TASK>
<USER_TASK:>
Description:
def __calculate_adjacency_lists(graph):
"""Builds an adjacency list representation for the graph, since we can't guarantee that the
internal representation of the graph is stored that way.""" |
adj = {}
for node in graph.get_all_node_ids():
neighbors = graph.neighbors(node)
adj[node] = neighbors
return adj |
<SYSTEM_TASK:>
Calculates the lowpoints for each node in a graph.
<END_TASK>
<USER_TASK:>
Description:
def __get_all_lowpoints(dfs_data):
"""Calculates the lowpoints for each node in a graph.""" |
lowpoint_1_lookup = {}
lowpoint_2_lookup = {}
ordering = dfs_data['ordering']
for node in ordering:
low_1, low_2 = __get_lowpoints(node, dfs_data)
lowpoint_1_lookup[node] = low_1
lowpoint_2_lookup[node] = low_2
return lowpoint_1_lookup, lowpoint_2_lookup |
<SYSTEM_TASK:>
Calculates the lowpoints for a single node in a graph.
<END_TASK>
<USER_TASK:>
Description:
def __get_lowpoints(node, dfs_data):
"""Calculates the lowpoints for a single node in a graph.""" |
ordering_lookup = dfs_data['ordering_lookup']
t_u = T(node, dfs_data)
sorted_t_u = sorted(t_u, key=lambda a: ordering_lookup[a])
lowpoint_1 = sorted_t_u[0]
lowpoint_2 = sorted_t_u[1]
return lowpoint_1, lowpoint_2 |
<SYSTEM_TASK:>
Determines whether a branch uv is a type I branch.
<END_TASK>
<USER_TASK:>
Description:
def is_type_I_branch(u, v, dfs_data):
"""Determines whether a branch uv is a type I branch.""" |
if u != a(v, dfs_data):
return False
if u == L2(v, dfs_data):
return True
return False |
<SYSTEM_TASK:>
Determines whether a branch uv is a type II branch.
<END_TASK>
<USER_TASK:>
Description:
def is_type_II_branch(u, v, dfs_data):
"""Determines whether a branch uv is a type II branch.""" |
if u != a(v, dfs_data):
return False
if u < L2(v, dfs_data):
return True
return False |
<SYSTEM_TASK:>
Gets the descendants of a node.
<END_TASK>
<USER_TASK:>
Description:
def __get_descendants(node, dfs_data):
"""Gets the descendants of a node.""" |
list_of_descendants = []
stack = deque()
children_lookup = dfs_data['children_lookup']
current_node = node
children = children_lookup[current_node]
dfs_current_node = D(current_node, dfs_data)
for n in children:
dfs_child = D(n, dfs_data)
# Validate that the child node is actually a descendant and not an ancestor
if dfs_child > dfs_current_node:
stack.append(n)
while len(stack) > 0:
current_node = stack.pop()
list_of_descendants.append(current_node)
children = children_lookup[current_node]
dfs_current_node = D(current_node, dfs_data)
for n in children:
dfs_child = D(n, dfs_data)
# Validate that the child node is actually a descendant and not an ancestor
if dfs_child > dfs_current_node:
stack.append(n)
return list_of_descendants |
<SYSTEM_TASK:>
The set of all descendants of u, with u added.
<END_TASK>
<USER_TASK:>
Description:
def S_star(u, dfs_data):
"""The set of all descendants of u, with u added.""" |
s_u = S(u, dfs_data)
if u not in s_u:
s_u.append(u)
return s_u |
<SYSTEM_TASK:>
Use this function if you are sure you have a single symbol.
<END_TASK>
<USER_TASK:>
Description:
def classify_segmented_recording(recording, result_format=None):
"""Use this function if you are sure you have a single symbol.
Parameters
----------
recording : string
The recording in JSON format
Returns
-------
list of dictionaries
Each dictionary contains the keys 'symbol' and 'probability'. The list
is sorted descending by probability.
""" |
global single_symbol_classifier
if single_symbol_classifier is None:
single_symbol_classifier = SingleClassificer()
return single_symbol_classifier.predict(recording, result_format) |
<SYSTEM_TASK:>
Predict the class of the given recording.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, recording, result_format=None):
"""Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list
""" |
evaluate = utils.evaluate_model_single_recording_preloaded
results = evaluate(self.preprocessing_queue,
self.feature_list,
self.model,
self.output_semantics,
recording)
if result_format == 'LaTeX':
for i in range(len(results)):
results[i]['semantics'] = results[i]['semantics'].split(";")[1]
for i in range(len(results)):
splitted = results[i]['semantics'].split(";")
results[i]['complete_latex'] = splitted[1]
return results |
<SYSTEM_TASK:>
Get a list of ids which describe which class they get mapped to.
<END_TASK>
<USER_TASK:>
Description:
def get_symbol_ids(symbol_yml_file, metadata):
"""
Get a list of ids which describe which class they get mapped to.
Parameters
----------
symbol_yml_file : string
Path to a YAML file.
metadata : dict
Metainformation of symbols, like the id on write-math.com.
Has keys 'symbols', 'tags', 'tags2symbols'.
Returns
-------
list of dictionaries : Each dictionary represents one output class and has
to have the keys 'id' (which is an id on write-math.com) and
'mappings' (which is a list of ids on write-math.com). The mappings
list should at least contain the id itself, but can contain more.
Examples
--------
>>> get_symbol_ids('symbols.yml')
[{'id': 42, 'mappings': [1, 42, 456, 1337]}, {'id': 2, 'mappings': [2]}]
The yml file has to be of the structure
```
- {latex: 'A'}
- {latex: 'B'}
- {latex: 'O',
mappings: ['0', 'o']}
- {latex: 'C'}
- {latex: '::REJECT::',
mappings: ['::ALL_FREE::']}
- {latex: '::ARROW::',
mappings: ['::TAG/arrow::'],
exclude: ['\rightarrow']}
```
""" |
with open(symbol_yml_file, 'r') as stream:
symbol_cfg = yaml.load(stream)
symbol_ids = []
symbol_ids_set = set()
for symbol in symbol_cfg:
if 'latex' not in symbol:
logging.error("Key 'latex' not found for a symbol in %s (%s)",
symbol_yml_file,
symbol)
sys.exit(-1)
results = [el for el in metadata['symbols']
if el['formula_in_latex'] == symbol['latex']]
if len(results) != 1:
logging.warning("Found %i results for %s: %s",
len(results),
symbol['latex'],
results)
if len(results) > 1:
results = sorted(results, key=lambda n: n['id'])
else:
sys.exit(-1)
mapping_ids = [results[0]['id']]
if 'mappings' in symbol:
for msymbol in symbol['mappings']:
filtered = [el for el in metadata['symbols']
if el['formula_in_latex'] == msymbol['latex']]
if len(filtered) != 1:
logging.error("Found %i results for %s: %s",
len(filtered),
msymbol,
filtered)
if len(filtered) > 1:
filtered = natsorted(filtered, key=lambda n: n['id'])
else:
sys.exit(-1)
mapping_ids.append(filtered[0]['id'])
symbol_ids.append({'id': int(results[0]['id']),
'formula_in_latex': results[0]['formula_in_latex'],
'mappings': mapping_ids})
for id_tmp in mapping_ids:
if id_tmp not in symbol_ids_set:
symbol_ids_set.add(id_tmp)
else:
for symbol_tmp in symbol_ids:
if id_tmp in symbol_tmp['mappings']:
break
logging.error('Symbol id %s is already used: %s',
id_tmp,
symbol_tmp)
sys.exit(-1)
# print(metadata.keys())
# for el in metadata:
# print(metadata[el][0].keys())
# TODO: assert no double mappings
# TODO: Support for
# - ::ALL_FREE:: - meaning the rest of all ids which are not assigned to
# any other class get assigned to this class
# - ::TAG/arrow:: - meaning all ids of the tag arrow get assigned here
# - exclude
logging.info('%i base classes and %i write-math ids.',
len(symbol_ids),
len(symbol_ids_set))
return symbol_ids |
<SYSTEM_TASK:>
Read a CSV into a list of dictionarys. The first line of the CSV determines
<END_TASK>
<USER_TASK:>
Description:
def read_csv(filepath):
"""
Read a CSV into a list of dictionarys. The first line of the CSV determines
the keys of the dictionary.
Parameters
----------
filepath : string
Returns
-------
list of dictionaries
""" |
symbols = []
with open(filepath, 'rb') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
symbols.append(row)
return symbols |
<SYSTEM_TASK:>
Load a pickle file of raw recordings.
<END_TASK>
<USER_TASK:>
Description:
def load_raw(raw_pickle_file):
"""
Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file.
""" |
with open(raw_pickle_file, 'rb') as f:
raw = pickle.load(f)
logging.info("Loaded %i recordings.", len(raw['handwriting_datasets']))
return raw |
<SYSTEM_TASK:>
Get metrics from a list of dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def get_metrics(metrics_description):
"""Get metrics from a list of dictionaries. """ |
return utils.get_objectlist(metrics_description,
config_key='data_analyzation_plugins',
module=sys.modules[__name__]) |
<SYSTEM_TASK:>
Truncate the file and return the filename.
<END_TASK>
<USER_TASK:>
Description:
def prepare_file(filename):
"""Truncate the file and return the filename.""" |
directory = os.path.join(utils.get_project_root(), "analyzation/")
if not os.path.exists(directory):
os.makedirs(directory)
workfilename = os.path.join(directory, filename)
open(workfilename, 'w').close() # Truncate the file
return workfilename |
<SYSTEM_TASK:>
Sort a list of formulas by `id`, where `id` represents the accepted
<END_TASK>
<USER_TASK:>
Description:
def sort_by_formula_id(raw_datasets):
"""
Sort a list of formulas by `id`, where `id` represents the accepted
formula id.
Parameters
----------
raw_datasets : list of dictionaries
A list of raw datasets.
Examples
--------
The parameter `raw_datasets` has to be of the format
>>> rd = [{'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=2953),
... 'formula_in_latex': 'A',
... 'id': 2953},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4037),
... 'formula_in_latex': 'A',
... 'id': 4037},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4056),
... 'formula_in_latex': 'A',
... 'id': 4056}]
>>> sort_by_formula_id(rd)
""" |
by_formula_id = defaultdict(list)
for el in raw_datasets:
by_formula_id[el['handwriting'].formula_id].append(el['handwriting'])
return by_formula_id |
<SYSTEM_TASK:>
Write all obtained data to a file.
<END_TASK>
<USER_TASK:>
Description:
def _write_data(self, symbols, err_recs, nr_recordings,
total_error_count, percentages, time_max_list):
"""Write all obtained data to a file.
Parameters
----------
symbols : list of tuples (String, non-negative int)
List of all symbols with the count of recordings
err_recs : dictionary
count of recordings by error type
nr_recordings : non-negative int
number of recordings
total_error_count : dictionary
Count of all error that have happened by type
percentages : list
List of all recordings where removing the dots changed the size of
the bounding box.
time_max_list : list
List of all recordings where the recording time is above a
threshold.
""" |
write_file = open(self.filename, "a")
s = ""
for symbol, count in sorted(symbols.items(), key=lambda n: n[0]):
if symbol in ['a', '0', 'A']:
s += "\n%s (%i), " % (symbol, count)
elif symbol in ['z', '9', 'Z']:
s += "%s (%i) \n" % (symbol, count)
else:
s += "%s (%i), " % (symbol, count)
print("## Data", file=write_file)
print("Symbols: %i" % len(symbols), file=write_file)
print("Recordings: %i" % sum(symbols.values()), file=write_file)
print("```", file=write_file)
print(s[:-1], file=write_file)
print("```", file=write_file)
# Show errors
print("Recordings with wild points: %i (%0.2f%%)" %
(err_recs['wild_points'],
float(err_recs['wild_points']) / nr_recordings * 100),
file=write_file)
print("wild points: %i" % total_error_count['wild_points'],
file=write_file)
print("Recordings with missing stroke: %i (%0.2f%%)" %
(err_recs['missing_stroke'],
float(err_recs['missing_stroke']) / nr_recordings * 100),
file=write_file)
print("Recordings with errors: %i (%0.2f%%)" %
(err_recs['total'],
float(err_recs['total']) / nr_recordings * 100),
file=write_file)
print("Recordings with dots: %i (%0.2f%%)" %
(err_recs['single_dots'],
float(err_recs['single_dots']) / nr_recordings * 100),
file=write_file)
print("dots: %i" % total_error_count['single_dots'], file=write_file)
print("size changing removal: %i (%0.2f%%)" %
(len(percentages),
float(len(percentages)) / nr_recordings * 100),
file=write_file)
print("%i recordings took more than %i ms. That were: " %
(len(time_max_list), self.time_max_threshold),
file=write_file)
for recording in time_max_list:
print("* %ims: %s: %s" %
(recording.get_time(),
utils.get_readable_time(recording.get_time()),
recording),
file=write_file)
write_file.close() |
<SYSTEM_TASK:>
Print the feature_list in a human-readable form.
<END_TASK>
<USER_TASK:>
Description:
def print_featurelist(feature_list):
"""
Print the feature_list in a human-readable form.
Parameters
----------
feature_list : list
feature objects
""" |
input_features = sum(map(lambda n: n.get_dimension(), feature_list))
print("## Features (%i)" % input_features)
print("```")
for algorithm in feature_list:
print("* %s" % str(algorithm))
print("```") |
<SYSTEM_TASK:>
The Douglas-Peucker line simplification takes a list of points as an
<END_TASK>
<USER_TASK:>
Description:
def _stroke_simplification(self, pointlist):
"""The Douglas-Peucker line simplification takes a list of points as an
argument. It tries to simplifiy this list by removing as many points
as possible while still maintaining the overall shape of the stroke.
It does so by taking the first and the last point, connecting them
by a straight line and searchin for the point with the highest
distance. If that distance is bigger than 'epsilon', the point is
important and the algorithm continues recursively.""" |
# Find the point with the biggest distance
dmax = 0
index = 0
for i in range(1, len(pointlist)):
d = geometry.perpendicular_distance(pointlist[i],
pointlist[0],
pointlist[-1])
if d > dmax:
index = i
dmax = d
# If the maximum distance is bigger than the threshold 'epsilon', then
# simplify the pointlist recursively
if dmax >= self.epsilon:
# Recursive call
rec_results1 = self._stroke_simplification(pointlist[0:index])
rec_results2 = self._stroke_simplification(pointlist[index:])
result_list = rec_results1[:-1] + rec_results2
else:
result_list = [pointlist[0], pointlist[-1]]
return result_list |
<SYSTEM_TASK:>
Get preprocessing queue from a list of dictionaries
<END_TASK>
<USER_TASK:>
Description:
def get_preprocessing_queue(preprocessing_list):
"""Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
]
""" |
return utils.get_objectlist(preprocessing_list,
config_key='preprocessing',
module=sys.modules[__name__]) |
<SYSTEM_TASK:>
Print the ``preproc_list`` in a human-readable form.
<END_TASK>
<USER_TASK:>
Description:
def print_preprocessing_list(preprocessing_queue):
"""
Print the ``preproc_list`` in a human-readable form.
Parameters
----------
preprocessing_queue : list of preprocessing objects
Algorithms that get applied for preprocessing.
""" |
print("## Preprocessing")
print("```")
for algorithm in preprocessing_queue:
print("* " + str(algorithm))
print("```") |
<SYSTEM_TASK:>
Take a list of points and calculate the factors for scaling and
<END_TASK>
<USER_TASK:>
Description:
def _get_parameters(self, hwr_obj):
""" Take a list of points and calculate the factors for scaling and
moving it so that it's in the unit square. Keept the aspect
ratio.
Optionally center the points inside of the unit square.
""" |
a = hwr_obj.get_bounding_box()
width = a['maxx'] - a['minx'] + self.width_add
height = a['maxy'] - a['miny'] + self.height_add
factor_x, factor_y = 1, 1
if width != 0:
factor_x = self.max_width / width
if height != 0:
factor_y = self.max_height / height
factor = min(factor_x, factor_y)
addx, addy = 0.0, 0.0
if self.center:
# Only one dimension (x or y) has to be centered (the smaller one)
add = -(factor / (2.0 * max(factor_x, factor_y)))
if factor == factor_x:
addy = add
if self.center_other:
addx = -(width * factor / 2.0)
else:
addx = add
if self.center_other:
addy = -(height * factor / 2.0)
assert factor > 0, "factor > 0 is False. factor = %s" % str(factor)
assert isinstance(addx, float), "addx is %s" % str(addx)
assert isinstance(addy, float), "addy is %s" % str(addy)
assert isinstance(a['minx'], (int, float)), "minx is %s" % str(a['minx'])
assert isinstance(a['miny'], (int, float)), "miny is %s" % str(a['miny'])
assert isinstance(a['mint'], (int, float)), "mint is %s" % str(a['mint'])
return {"factor": factor, "addx": addx, "addy": addy,
"minx": a['minx'], "miny": a['miny'], "mint": a['mint']} |
<SYSTEM_TASK:>
Calculate the intervall borders 'times' that contain the information
<END_TASK>
<USER_TASK:>
Description:
def _calculate_pen_down_strokes(self, pointlist, times=None):
"""Calculate the intervall borders 'times' that contain the information
when a stroke started, when it ended and how it should be
interpolated.""" |
if times is None:
times = []
for stroke in pointlist:
stroke_info = {"start": stroke[0]['time'],
"end": stroke[-1]['time'],
"pen_down": True}
# set up variables for interpolation
x, y, t = [], [], []
for point in stroke:
if point['time'] not in t:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
x, y = numpy.array(x), numpy.array(y)
if len(t) == 1:
# constant interpolation
fx, fy = lambda x: float(x), lambda y: float(y)
elif len(t) == 2:
# linear interpolation
fx, fy = interp1d(t, x, 'linear'), interp1d(t, y, 'linear')
elif len(t) == 3:
# quadratic interpolation
fx = interp1d(t, x, 'quadratic')
fy = interp1d(t, y, 'quadratic')
else:
fx, fy = interp1d(t, x, self.kind), interp1d(t, y, self.kind)
stroke_info['fx'] = fx
stroke_info['fy'] = fy
times.append(stroke_info)
return times |
<SYSTEM_TASK:>
'Pen-up' strokes are virtual strokes that were not drawn. It
<END_TASK>
<USER_TASK:>
Description:
def _calculate_pen_up_strokes(self, pointlist, times=None):
""" 'Pen-up' strokes are virtual strokes that were not drawn. It
models the time when the user moved from one stroke to the next.
""" |
if times is None:
times = []
for i in range(len(pointlist) - 1):
stroke_info = {"start": pointlist[i][-1]['time'],
"end": pointlist[i + 1][0]['time'],
"pen_down": False}
x, y, t = [], [], []
for point in [pointlist[i][-1], pointlist[i + 1][0]]:
if point['time'] not in t:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
if len(x) == 1:
# constant interpolation
fx, fy = lambda x: float(x), lambda y: float(y)
else:
# linear interpolation
x, y = numpy.array(x), numpy.array(y)
fx = interp1d(t, x, kind='linear')
fy = interp1d(t, y, kind='linear')
stroke_info['fx'] = fx
stroke_info['fy'] = fy
times.append(stroke_info)
return times |
<SYSTEM_TASK:>
Do the interpolation of 'kind' for 'stroke
<END_TASK>
<USER_TASK:>
Description:
def _space(self, hwr_obj, stroke, kind):
"""Do the interpolation of 'kind' for 'stroke'""" |
new_stroke = []
stroke = sorted(stroke, key=lambda p: p['time'])
x, y, t = [], [], []
for point in stroke:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
x, y = numpy.array(x), numpy.array(y)
failed = False
try:
fx = interp1d(t, x, kind=kind)
fy = interp1d(t, y, kind=kind)
except Exception as e: # pylint: disable=W0703
if hwr_obj.raw_data_id is not None:
logging.debug("spline failed for raw_data_id %i",
hwr_obj.raw_data_id)
else:
logging.debug("spline failed")
logging.debug(e)
failed = True
tnew = numpy.linspace(t[0], t[-1], self.number)
# linear interpolation fallback due to
# https://github.com/scipy/scipy/issues/3868
if failed:
try:
fx = interp1d(t, x, kind='linear')
fy = interp1d(t, y, kind='linear')
failed = False
except Exception as e:
logging.debug("len(stroke) = %i", len(stroke))
logging.debug("len(x) = %i", len(x))
logging.debug("len(y) = %i", len(y))
logging.debug("stroke=%s", stroke)
raise e
for x, y, t in zip(fx(tnew), fy(tnew), tnew):
new_stroke.append({'x': x, 'y': y, 'time': t})
return new_stroke |
<SYSTEM_TASK:>
Calculate the arithmetic mean of the points x and y coordinates
<END_TASK>
<USER_TASK:>
Description:
def _calculate_average(self, points):
"""Calculate the arithmetic mean of the points x and y coordinates
seperately.
""" |
assert len(self.theta) == len(points), \
"points has length %i, but should have length %i" % \
(len(points), len(self.theta))
new_point = {'x': 0, 'y': 0, 'time': 0}
for key in new_point:
new_point[key] = self.theta[0] * points[0][key] + \
self.theta[1] * points[1][key] + \
self.theta[2] * points[2][key]
return new_point |
<SYSTEM_TASK:>
Create a model if it doesn't exist already.
<END_TASK>
<USER_TASK:>
Description:
def create_model(model_folder, model_type, topology, override):
"""
Create a model if it doesn't exist already.
Parameters
----------
model_folder :
The path to the folder where the model is described with an `info.yml`
model_type :
MLP
topology :
Something like 160:500:369 - that means the first layer has 160
neurons, the second layer has 500 neurons and the last layer has 369
neurons.
override : boolean
If a model exists, override it.
""" |
latest_model = utils.get_latest_in_folder(model_folder, ".json")
if (latest_model == "") or override:
logging.info("Create a base model...")
model_src = os.path.join(model_folder, "model-0.json")
command = "%s make %s %s > %s" % (utils.get_nntoolkit(),
model_type,
topology,
model_src)
logging.info(command)
os.system(command)
else:
logging.info("Model file already existed.") |
<SYSTEM_TASK:>
Parse the info.yml from ``model_folder`` and create the model file.
<END_TASK>
<USER_TASK:>
Description:
def main(model_folder, override=False):
"""Parse the info.yml from ``model_folder`` and create the model file.""" |
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
project_root = utils.get_project_root()
# Read the feature description file
feature_folder = os.path.join(project_root,
model_description['data-source'])
with open(os.path.join(feature_folder, "info.yml"), 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
# Get a list of all used features
feature_list = features.get_features(feature_description['features'])
# Get the dimension of the feature vector
input_features = sum(map(lambda n: n.get_dimension(), feature_list))
logging.info("Number of features: %i", input_features)
# Analyze model
logging.info(model_description['model'])
if model_description['model']['type'] != 'mlp':
return
create_model(model_folder,
model_description['model']['type'],
model_description['model']['topology'],
override)
utils.create_run_logfile(model_folder) |
<SYSTEM_TASK:>
Get some strokes of pointlist
<END_TASK>
<USER_TASK:>
Description:
def _get_part(pointlist, strokes):
"""Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts
""" |
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result |
<SYSTEM_TASK:>
Get a dictionary which translates from a neural network output to
<END_TASK>
<USER_TASK:>
Description:
def _get_translate():
"""
Get a dictionary which translates from a neural network output to
semantics.
""" |
translate = {}
model_path = pkg_resources.resource_filename('hwrt', 'misc/')
translation_csv = os.path.join(model_path, 'latex2writemathindex.csv')
arguments = {'newline': '', 'encoding': 'utf8'}
with open(translation_csv, 'rt', **arguments) as csvfile:
contents = csvfile.read()
lines = contents.split("\n")
for csvrow in lines:
csvrow = csvrow.split(',')
if len(csvrow) == 1:
writemathid = csvrow[0]
latex = ""
else:
writemathid, latex = csvrow[0], csvrow[1:]
latex = ','.join(latex)
translate[latex] = writemathid
return translate |
<SYSTEM_TASK:>
Generate a string that contains a command with all necessary
<END_TASK>
<USER_TASK:>
Description:
def generate_training_command(model_folder):
"""Generate a string that contains a command with all necessary
parameters to train the model.""" |
update_if_outdated(model_folder)
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
# Get the data paths (hdf5 files)
project_root = utils.get_project_root()
data = {}
data['training'] = os.path.join(project_root,
model_description["data-source"],
"traindata.hdf5")
data['testing'] = os.path.join(project_root,
model_description["data-source"],
"testdata.hdf5")
data['validating'] = os.path.join(project_root,
model_description["data-source"],
"validdata.hdf5")
# Get latest model file
basename = "model"
latest_model = utils.get_latest_working_model(model_folder)
if latest_model == "":
logging.error("There is no model with basename '%s'.", basename)
return None
else:
logging.info("Model '%s' found.", latest_model)
i = int(latest_model.split("-")[-1].split(".")[0])
model_src = os.path.join(model_folder, "%s-%i.json" % (basename, i))
model_target = os.path.join(model_folder,
"%s-%i.json" % (basename, i+1))
# generate the training command
training = model_description['training']
training = training.replace("{{testing}}", data['testing'])
training = training.replace("{{training}}", data['training'])
training = training.replace("{{validation}}", data['validating'])
training = training.replace("{{src_model}}", model_src)
training = training.replace("{{target_model}}", model_target)
training = training.replace("{{nntoolkit}}", utils.get_nntoolkit())
return training |
<SYSTEM_TASK:>
Main part of the training script.
<END_TASK>
<USER_TASK:>
Description:
def main(model_folder):
"""Main part of the training script.""" |
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
# Analyze model
logging.info(model_description['model'])
data = {}
data['training'] = os.path.join(model_folder, "traindata.hdf5")
data['testing'] = os.path.join(model_folder, "testdata.hdf5")
data['validating'] = os.path.join(model_folder, "validdata.hdf5")
train_model(model_folder) |
<SYSTEM_TASK:>
Get the bounding box of a list of points.
<END_TASK>
<USER_TASK:>
Description:
def get_bounding_box(points):
"""Get the bounding box of a list of points.
Parameters
----------
points : list of points
Returns
-------
BoundingBox
""" |
assert len(points) > 0, "At least one point has to be given."
min_x, max_x = points[0]['x'], points[0]['x']
min_y, max_y = points[0]['y'], points[0]['y']
for point in points:
min_x, max_x = min(min_x, point['x']), max(max_x, point['x'])
min_y, max_y = min(min_y, point['y']), max(max_y, point['y'])
p1 = Point(min_x, min_y)
p2 = Point(max_x, max_y)
return BoundingBox(p1, p2) |
<SYSTEM_TASK:>
Check if BoundingBox a intersects with BoundingBox b.
<END_TASK>
<USER_TASK:>
Description:
def do_bb_intersect(a, b):
"""Check if BoundingBox a intersects with BoundingBox b.""" |
return a.p1.x <= b.p2.x \
and a.p2.x >= b.p1.x \
and a.p1.y <= b.p2.y \
and a.p2.y >= b.p1.y |
<SYSTEM_TASK:>
Calculate the distance from p3 to the stroke defined by p1 and p2.
<END_TASK>
<USER_TASK:>
Description:
def perpendicular_distance(p3, p1, p2):
"""
Calculate the distance from p3 to the stroke defined by p1 and p2.
The distance is the length of the perpendicular from p3 on p1.
Parameters
----------
p1 : dictionary with "x" and "y"
start of stroke
p2 : dictionary with "x" and "y"
end of stroke
p3 : dictionary with "x" and "y"
point
""" |
px = p2['x']-p1['x']
py = p2['y']-p1['y']
squared_distance = px*px + py*py
if squared_distance == 0:
# The line is in fact only a single dot.
# In this case the distance of two points has to be
# calculated
line_point = Point(p1['x'], p1['y'])
point = Point(p3['x'], p3['y'])
return line_point.dist_to(point)
u = ((p3['x'] - p1['x'])*px + (p3['y'] - p1['y'])*py) / squared_distance
if u > 1:
u = 1
elif u < 0:
u = 0
x = p1['x'] + u * px
y = p1['y'] + u * py
dx = x - p3['x']
dy = y - p3['y']
# Note: If the actual distance does not matter,
# if you only want to compare what this function
# returns to other results of this function, you
# can just return the squared distance instead
# (i.e. remove the sqrt) to gain a little performance
dist = math.sqrt(dx*dx + dy*dy)
return dist |
<SYSTEM_TASK:>
Measure the distance to another point.
<END_TASK>
<USER_TASK:>
Description:
def dist_to(self, p2):
"""Measure the distance to another point.""" |
return math.hypot(self.x - p2.x, self.y - p2.y) |
<SYSTEM_TASK:>
Return the slope m of this line segment.
<END_TASK>
<USER_TASK:>
Description:
def get_slope(self):
"""Return the slope m of this line segment.""" |
# y1 = m*x1 + t
# y2 = m*x2 + t => y1-y2 = m*(x1-x2) <=> m = (y1-y2)/(x1-x2)
return ((self.p1.y-self.p2.y) / (self.p1.x-self.p2.x)) |
<SYSTEM_TASK:>
Get the offset t of this line segment.
<END_TASK>
<USER_TASK:>
Description:
def get_offset(self):
"""Get the offset t of this line segment.""" |
return self.p1.y-self.get_slope()*self.p1.x |
<SYSTEM_TASK:>
Get the number of self-intersections of this polygonal chain.
<END_TASK>
<USER_TASK:>
Description:
def count_selfintersections(self):
""" Get the number of self-intersections of this polygonal chain.""" |
# This can be solved more efficiently with sweep line
counter = 0
for i, j in itertools.combinations(range(len(self.lineSegments)), 2):
inters = get_segments_intersections(self.lineSegments[i],
self.lineSegments[j])
if abs(i-j) > 1 and len(inters) > 0:
counter += 1
return counter |
<SYSTEM_TASK:>
Count the intersections of two strokes with each other.
<END_TASK>
<USER_TASK:>
Description:
def count_intersections(self, line_segments_b):
"""
Count the intersections of two strokes with each other.
Parameters
----------
line_segments_b : list
A list of line segemnts
Returns
-------
int
The number of intersections between A and B.
""" |
line_segments_a = self.lineSegments
# Calculate intersections
intersection_points = []
for line1, line2 in itertools.product(line_segments_a,
line_segments_b):
intersection_points += get_segments_intersections(line1, line2)
return len(set(intersection_points)) |
<SYSTEM_TASK:>
Calculate area of bounding box.
<END_TASK>
<USER_TASK:>
Description:
def get_area(self):
"""Calculate area of bounding box.""" |
return (self.p2.x-self.p1.x)*(self.p2.y-self.p1.y) |
<SYSTEM_TASK:>
Get the center point of this bounding box.
<END_TASK>
<USER_TASK:>
Description:
def get_center(self):
"""
Get the center point of this bounding box.
""" |
return Point((self.p1.x+self.p2.x)/2.0, (self.p1.y+self.p2.y)/2.0) |
<SYSTEM_TASK:>
List raw data IDs grouped by symbol ID from a pickle file
<END_TASK>
<USER_TASK:>
Description:
def _list_ids(path_to_data):
"""List raw data IDs grouped by symbol ID from a pickle file
``path_to_data``.""" |
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
raw_ids = {}
for raw_dataset in raw_datasets:
raw_data_id = raw_dataset['handwriting'].raw_data_id
if raw_dataset['formula_id'] not in raw_ids:
raw_ids[raw_dataset['formula_id']] = [raw_data_id]
else:
raw_ids[raw_dataset['formula_id']].append(raw_data_id)
for symbol_id in sorted(raw_ids):
print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id]))) |
<SYSTEM_TASK:>
Print ``raw_data_id`` with the content ``raw_data_string`` after
<END_TASK>
<USER_TASK:>
Description:
def display_data(raw_data_string, raw_data_id, model_folder, show_raw):
"""Print ``raw_data_id`` with the content ``raw_data_string`` after
applying the preprocessing of ``model_folder`` to it.""" |
print("## Raw Data (ID: %i)" % raw_data_id)
print("```")
print(raw_data_string)
print("```")
preprocessing_desc, feature_desc, _ = _get_system(model_folder)
# Print model
print("## Model")
print("%s\n" % model_folder)
# Get the preprocessing queue
tmp = preprocessing_desc['queue']
preprocessing_queue = preprocessing.get_preprocessing_queue(tmp)
# Get feature values as list of floats, rounded to 3 decimal places
tmp = feature_desc['features']
feature_list = features.get_features(tmp)
# Print preprocessing queue
preprocessing.print_preprocessing_list(preprocessing_queue)
features.print_featurelist(feature_list)
# Get Handwriting
recording = handwritten_data.HandwrittenData(raw_data_string,
raw_data_id=raw_data_id)
if show_raw:
recording.show()
recording.preprocessing(preprocessing_queue)
feature_values = recording.feature_extraction(feature_list)
feature_values = [round(el, 3) for el in feature_values]
print("Features:")
print(feature_values)
# Get the list of data multiplication algorithms
mult_queue = data_multiplication.get_data_multiplication_queue(
feature_desc['data-multiplication'])
# Multiply traing_set
training_set = [{'id': 42,
'formula_id': 42,
'formula_in_latex': 'None',
'handwriting': recording}]
training_set = create_ffiles.training_set_multiplication(training_set,
mult_queue)
# Display it
logging.info("Show %i recordings...", len(training_set))
for recording in training_set:
recording['handwriting'].show() |
<SYSTEM_TASK:>
Get the parameters of the preprocessing done within `folder`.
<END_TASK>
<USER_TASK:>
Description:
def get_parameters(folder):
"""Get the parameters of the preprocessing done within `folder`.
Parameters
----------
folder : string
Returns
-------
tuple : (path of raw data,
path where preprocessed data gets stored,
list of preprocessing algorithms)
""" |
# Read the model description file
with open(os.path.join(folder, "info.yml"), 'r') as ymlfile:
preprocessing_description = yaml.load(ymlfile)
# Get the path of the raw data
raw_datapath = os.path.join(utils.get_project_root(),
preprocessing_description['data-source'])
# Get the path were the preprocessed file should be put
outputpath = os.path.join(folder, "data.pickle")
# Get the preprocessing queue
tmp = preprocessing_description['queue']
preprocessing_queue = preprocessing.get_preprocessing_queue(tmp)
return (raw_datapath, outputpath, preprocessing_queue) |
<SYSTEM_TASK:>
Create a preprocessed dataset file by applying `preprocessing_queue`
<END_TASK>
<USER_TASK:>
Description:
def create_preprocessed_dataset(path_to_data, outputpath, preprocessing_queue):
"""Create a preprocessed dataset file by applying `preprocessing_queue`
to `path_to_data`. The result will be stored in `outputpath`.""" |
# Log everything
logging.info("Data soure %s", path_to_data)
logging.info("Output will be stored in %s", outputpath)
tmp = "Preprocessing Queue:\n"
for preprocessing_class in preprocessing_queue:
tmp += str(preprocessing_class) + "\n"
logging.info(tmp)
# Load from pickled file
if not os.path.isfile(path_to_data):
logging.info(("'%s' does not exist. Please either abort this script "
"or update the data location."), path_to_data)
raw_dataset_path = utils.choose_raw_dataset()
# Get project-relative path
raw_dataset_path = "raw-datasets" + \
raw_dataset_path.split("raw-datasets")[1]
print(raw_dataset_path)
sys.exit() # TODO: Update model!
logging.info("Start loading data...")
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
logging.info("Start applying preprocessing methods")
start_time = time.time()
for i, raw_dataset in enumerate(raw_datasets):
if i % 10 == 0 and i > 0:
utils.print_status(len(raw_datasets), i, start_time)
# Do the work
raw_dataset['handwriting'].preprocessing(preprocessing_queue)
sys.stdout.write("\r%0.2f%% (done)\033[K\n" % (100))
print("")
pickle.dump({'handwriting_datasets': raw_datasets,
'formula_id2latex': loaded['formula_id2latex'],
'preprocessing_queue': preprocessing_queue},
open(outputpath, "wb"),
2) |
<SYSTEM_TASK:>
Main part of preprocess_dataset that glues things togeter.
<END_TASK>
<USER_TASK:>
Description:
def main(folder):
"""Main part of preprocess_dataset that glues things togeter.""" |
raw_datapath, outputpath, p_queue = get_parameters(folder)
create_preprocessed_dataset(raw_datapath, outputpath, p_queue)
utils.create_run_logfile(folder) |
<SYSTEM_TASK:>
Create a lookup file where the index is mapped to the formula id and the
<END_TASK>
<USER_TASK:>
Description:
def _create_index_formula_lookup(formula_id2index,
feature_folder,
index2latex):
"""
Create a lookup file where the index is mapped to the formula id and the
LaTeX command.
Parameters
----------
formula_id2index : dict
feature_folder : str
Path to a folder in which a feature file as well as an
index2formula_id.csv is.
index2latex : dict
Maps an integer index to a LaTeX command
""" |
index2formula_id = sorted(formula_id2index.items(), key=lambda n: n[1])
index2formula_file = os.path.join(feature_folder, "index2formula_id.csv")
with open(index2formula_file, "w") as f:
f.write("index,formula_id,latex\n")
for formula_id, index in index2formula_id:
f.write("%i,%i,%s\n" % (index, formula_id, index2latex[index])) |
<SYSTEM_TASK:>
Multiply the training set by all methods listed in mult_queue.
<END_TASK>
<USER_TASK:>
Description:
def training_set_multiplication(training_set, mult_queue):
"""
Multiply the training set by all methods listed in mult_queue.
Parameters
----------
training_set :
set of all recordings that will be used for training
mult_queue :
list of all algorithms that will take one recording and generate more
than one.
Returns
-------
mutliple recordings
""" |
logging.info("Multiply data...")
for algorithm in mult_queue:
new_trning_set = []
for recording in training_set:
samples = algorithm(recording['handwriting'])
for sample in samples:
new_trning_set.append({'id': recording['id'],
'is_in_testset': 0,
'formula_id': recording['formula_id'],
'handwriting': sample,
'formula_in_latex':
recording['formula_in_latex']})
training_set = new_trning_set
return new_trning_set |
<SYSTEM_TASK:>
Calculate min, max and mean for each feature. Store it in object.
<END_TASK>
<USER_TASK:>
Description:
def _calculate_feature_stats(feature_list, prepared, serialization_file): # pylint: disable=R0914
"""Calculate min, max and mean for each feature. Store it in object.""" |
# Create feature only list
feats = [x for x, _ in prepared] # Label is not necessary
# Calculate all means / mins / maxs
means = numpy.mean(feats, 0)
mins = numpy.min(feats, 0)
maxs = numpy.max(feats, 0)
# Calculate, min, max and mean vector for each feature with
# normalization
start = 0
mode = 'w'
arguments = {'newline': ''}
if sys.version_info.major < 3:
mode += 'b'
arguments = {}
with open(serialization_file, mode, **arguments) as csvfile:
spamwriter = csv.writer(csvfile,
delimiter=str(';'),
quotechar=str('"'),
quoting=csv.QUOTE_MINIMAL)
for feature in feature_list:
end = start + feature.get_dimension()
# append the data to the feature class
feature.mean = numpy.array(means[start:end])
feature.min = numpy.array(mins[start:end])
feature.max = numpy.array(maxs[start:end])
start = end
for mean, fmax, fmin in zip(feature.mean, feature.max,
feature.min):
spamwriter.writerow([mean, fmax - fmin]) |
<SYSTEM_TASK:>
Create the hdf5 file.
<END_TASK>
<USER_TASK:>
Description:
def make_hdf5(dataset_name, feature_count, data,
output_filename, create_learning_curve):
"""
Create the hdf5 file.
Parameters
----------
filename :
name of the file that hdf5_create will use to create the hdf5 file.
feature_count : integer
number of features
data : list of tuples
data format ('feature_string', 'label')
""" |
# create raw data file for hdf5_create
if dataset_name == "traindata" and create_learning_curve:
max_trainingexamples = 501
output_filename_save = output_filename
steps = 10
for trainingexamples in range(100, max_trainingexamples, steps):
# adjust output_filename
tmp = output_filename_save.split(".")
tmp[-2] += "-%i-examples" % trainingexamples
output_filename = ".".join(map(str, tmp))
# Make sure the data has not more than ``trainingexamples``
seen_symbols = defaultdict(int)
new_data = {}
for feature_string, label in data:
if seen_symbols[label] < trainingexamples:
seen_symbols[label] += 1
new_data = (feature_string, label)
# Create the hdf5 file
utils.create_hdf5(output_filename, feature_count, new_data)
else:
utils.create_hdf5(output_filename, feature_count, data) |
<SYSTEM_TASK:>
Create a dataset for machine learning of segmentations.
<END_TASK>
<USER_TASK:>
Description:
def get_dataset():
"""Create a dataset for machine learning of segmentations.
Returns
-------
tuple :
(X, y) where X is a list of tuples. Each tuple is a feature. y
is a list of labels (0 for 'not in one symbol' and 1 for 'in symbol')
""" |
seg_data = "segmentation-X.npy"
seg_labels = "segmentation-y.npy"
# seg_ids = "segmentation-ids.npy"
if os.path.isfile(seg_data) and os.path.isfile(seg_labels):
X = numpy.load(seg_data)
y = numpy.load(seg_labels)
with open('datasets.pickle', 'rb') as f:
datasets = pickle.load(f)
return (X, y, datasets)
datasets = get_segmented_raw_data()
X, y = [], []
for i, data in enumerate(datasets):
if i % 10 == 0:
logging.info("[Create Dataset] i=%i/%i", i, len(datasets))
segmentation = json.loads(data['segmentation'])
recording = json.loads(data['data'])
X_symbol = [get_median_stroke_distance(recording)]
if len([p for s in recording for p in s if p['time'] is None]) > 0:
continue
combis = itertools.combinations(list(range(len(recording))), 2)
for strokeid1, strokeid2 in combis:
stroke1 = recording[strokeid1]
stroke2 = recording[strokeid2]
if len(stroke1) == 0 or len(stroke2) == 0:
logging.debug("stroke len 0. Skip.")
continue
X.append(get_stroke_features(recording, strokeid1, strokeid2) +
X_symbol)
same_symbol = (_get_symbol_index(strokeid1, segmentation) ==
_get_symbol_index(strokeid2, segmentation))
y.append(int(same_symbol))
X = numpy.array(X, dtype=numpy.float32)
y = numpy.array(y, dtype=numpy.int32)
numpy.save(seg_data, X)
numpy.save(seg_labels, y)
datasets = filter_recordings(datasets)
with open('datasets.pickle', 'wb') as f:
pickle.dump(datasets, f, protocol=pickle.HIGHEST_PROTOCOL)
return (X, y, datasets) |
<SYSTEM_TASK:>
Fetch data from the server.
<END_TASK>
<USER_TASK:>
Description:
def get_segmented_raw_data(top_n=10000):
"""Fetch data from the server.
Parameters
----------
top_n : int
Number of data sets which get fetched from the server.
""" |
cfg = utils.get_database_configuration()
mysql = cfg['mysql_online']
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("SELECT `id`, `data`, `segmentation` "
"FROM `wm_raw_draw_data` WHERE "
"(`segmentation` IS NOT NULL OR `accepted_formula_id` IS NOT NULL) "
"AND `wild_point_count` = 0 "
"AND `stroke_segmentable` = 1 "
"ORDER BY `id` LIMIT 0, %i") % top_n
logging.info(sql)
cursor.execute(sql)
datasets = cursor.fetchall()
logging.info("Fetched %i recordings. Add missing segmentations.",
len(datasets))
for i in range(len(datasets)):
if datasets[i]['segmentation'] is None:
stroke_count = len(json.loads(datasets[i]['data']))
if stroke_count > 10:
print("Massive stroke count! %i" % stroke_count)
datasets[i]['segmentation'] = str([[s for s in
range(stroke_count)]])
return datasets |
<SYSTEM_TASK:>
Get the features used to decide if two strokes belong to the same symbol
<END_TASK>
<USER_TASK:>
Description:
def get_stroke_features(recording, strokeid1, strokeid2):
"""Get the features used to decide if two strokes belong to the same symbol
or not.
Parameters
----------
recording : list
A list of strokes
strokeid1 : int
strokeid2 : int
Returns
-------
list :
A list of features which could be useful to decide if stroke1 and
stroke2 belong to the same symbol.
""" |
stroke1 = recording[strokeid1]
stroke2 = recording[strokeid2]
assert isinstance(stroke1, list), "stroke1 is a %s" % type(stroke1)
X_i = []
for s in [stroke1, stroke2]:
hw = HandwrittenData(json.dumps([s]))
feat1 = features.ConstantPointCoordinates(strokes=1,
points_per_stroke=20,
fill_empty_with=0)
feat2 = features.ReCurvature(strokes=1)
feat3 = features.Ink()
X_i += hw.feature_extraction([feat1, feat2, feat3])
X_i += [get_strokes_distance(stroke1, stroke2)] # Distance of strokes
X_i += [get_time_distance(stroke1, stroke2)] # Time in between
X_i += [abs(strokeid2-strokeid1)] # Strokes in between
# X_i += [get_black_percentage()]
return X_i |
<SYSTEM_TASK:>
Get a list of segmentations of recording with the probability of the
<END_TASK>
<USER_TASK:>
Description:
def get_segmentation(recording,
single_clf,
single_stroke_clf,
stroke_segmented_classifier):
"""
Get a list of segmentations of recording with the probability of the
segmentation being correct.
Parameters
----------
recording : A list of lists
Each sublist represents a stroke
single_clf : object
A classifier for single symbols
single_stroke_clf : object
A classifier which decides if a single stroke is a complete symbol
stroke_segmented_classifier : object
Classifier which decides if two strokes belong to one symbol or not
Returns
-------
list of tuples :
Segmentations together with their probabilities. Each probability
has to be positive and the sum may not be bigger than 1.0.
Examples
--------
>>> stroke1 = [{'x': 0, 'y': 0, 'time': 0}, {'x': 12, 'y': 12, 'time': 1}]
>>> stroke2 = [{'x': 0, 'y': 10, 'time': 2}, {'x': 12, 'y': 0, 'time': 3}]
>>> stroke3 = [{'x': 14, 'y': 0, 'time': 5}, {'x': 14, 'y': 12, 'time': 6}]
>>> #get_segmentation([stroke1, stroke2, stroke3], single_clf)
[
([[0, 1], [2]], 0.8),
([[0], [1,2]], 0.1),
([[0,2], [1]], 0.05)
]
""" |
mst_wood = get_mst_wood(recording, single_clf)
return [(normalize_segmentation([mst['strokes'] for mst in mst_wood]),
1.0)]
# HandwrittenData(json.dumps(recording)).show()
# return [([[i for i in range(len(recording))]], 1.0)]
# #mst_wood = break_mst(mst, recording) # TODO
# for i in range(0, 2**len(points)):
# segmentation = get_segmentation_from_mst(mst, i)
# TODO
X_symbol = [get_median_stroke_distance(recording)]
# Pre-segment to 8 strokes
# TODO: Take first 4 strokes and add strokes within their bounding box
# TODO: What if that is more then 8 strokes?
# -> Geometry
# Build tree structure. A stroke `c` is the child of another stroke `p`,
# if the bounding box of `c` is within the bounding box of `p`.
# Problem: B <-> 13
g_top_segmentations = [([], 1.0)] # g_top_segmentations
# range(int(math.ceil(float(len(recording))/8))):
for chunk_part in mst_wood:
# chunk = recording[8*chunk_part:8*(chunk_part+1)]
chunk = [recording[stroke] for stroke in chunk_part['strokes']]
# Segment after pre-segmentation
prob = [[1.0 for _ in chunk] for _ in chunk]
for strokeid1, strokeid2 in itertools.product(range(len(chunk)),
range(len(chunk))):
if strokeid1 == strokeid2:
continue
X = get_stroke_features(chunk, strokeid1, strokeid2)
X += X_symbol
X = numpy.array([X], dtype=numpy.float32)
prob[strokeid1][strokeid2] = stroke_segmented_classifier(X)
# Top segmentations
ts = list(partitions.get_top_segmentations(prob, 500))
for i, segmentation in enumerate(ts):
symbols = apply_segmentation(chunk, segmentation)
min_top2 = partitions.TopFinder(1, find_min=True)
for i, symbol in enumerate(symbols):
predictions = single_clf.predict(symbol)
min_top2.push("value-%i" % i,
predictions[0]['probability'] +
predictions[1]['probability'])
ts[i][1] *= list(min_top2)[0][1]
# for i, segmentation in enumerate(ts):
# ts[i][0] = update_segmentation_data(ts[i][0], 8*chunk_part)
g_top_segmentations = merge_segmentations(g_top_segmentations,
ts,
chunk_part['strokes'])
return [(normalize_segmentation(seg), probability)
for seg, probability in g_top_segmentations] |
<SYSTEM_TASK:>
Break mst into multiple MSTs by removing one node i.
<END_TASK>
<USER_TASK:>
Description:
def break_mst(mst, i):
"""
Break mst into multiple MSTs by removing one node i.
Parameters
----------
mst : symmetrical square matrix
i : index of the mst where to break
Returns
-------
list of dictionarys ('mst' and 'strokes' are the keys)
""" |
for j in range(len(mst['mst'])):
mst['mst'][i][j] = 0
mst['mst'][j][i] = 0
_, components = scipy.sparse.csgraph.connected_components(mst['mst'])
comp_indices = {}
for el in set(components):
comp_indices[el] = {'strokes': [], 'strokes_i': []}
for i, comp_nr in enumerate(components):
comp_indices[comp_nr]['strokes'].append(mst['strokes'][i])
comp_indices[comp_nr]['strokes_i'].append(i)
mst_wood = []
for key in comp_indices:
matrix = []
for i, line in enumerate(mst['mst']):
line_add = []
if i not in comp_indices[key]['strokes_i']:
continue
for j, el in enumerate(line):
if j in comp_indices[key]['strokes_i']:
line_add.append(el)
matrix.append(line_add)
assert len(matrix) > 0, \
("len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" %
(comp_indices[key]['strokes'], mst, i))
assert len(matrix) == len(matrix[0]), \
("matrix was %i x %i, but should be square" %
(len(matrix), len(matrix[0])))
assert len(matrix) == len(comp_indices[key]['strokes']), \
(("stroke length was not equal to matrix length "
"(strokes=%s, len(matrix)=%i)") %
(comp_indices[key]['strokes'], len(matrix)))
mst_wood.append({'mst': matrix,
'strokes': comp_indices[key]['strokes']})
return mst_wood |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.