code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def create_graph_from_adjacency_matrix(adjacency_matrix):
if is_adjacency_matrix_symmetric(adjacency_matrix):
graph = UndirectedGraph()
else:
graph = DirectedGraph()
node_column_mapping = []
num_columns = len(adjacency_matrix)
for _ in range(num_columns):
node_id = graph.new_node()
node_column_mapping.append(node_id)
for j in range(num_columns):
for i in range(num_columns):
if adjacency_matrix[j][i]:
jnode_id = node_column_mapping[j]
inode_id = node_column_mapping[i]
# Because of our adjacency matrix encoding, [j][i] in our code corresponds to [i][j] in a traditional matrix interpretation
# Thus, we need to put an edge from node i to node j if [j][i] in our code is non-zero
graph.new_edge(inode_id, jnode_id)
return (graph, node_column_mapping) | Generates a graph from an adjacency matrix specification.
Returns a tuple containing the graph and a list-mapping of node ids to matrix column indices.
The graph will be an UndirectedGraph if the provided adjacency matrix is symmetric.
The graph will be a DirectedGraph if the provided adjacency matrix is not symmetric.
Ref: http://mathworld.wolfram.com/AdjacencyMatrix.html |
def is_adjacency_matrix_symmetric(adjacency_matrix):
# Verify that the matrix is square
num_columns = len(adjacency_matrix)
for column in adjacency_matrix:
# In a square matrix, every row should be the same length as the number of columns
if len(column) != num_columns:
return False
# Loop through the bottom half of the matrix and compare it to the top half
# --We do the bottom half because of how we construct adjacency matrices
max_i = 0
for j in range(num_columns):
for i in range(max_i):
# If i == j, we can skip ahead so we don't compare with ourself
if i == j:
continue
# Compare the value in the bottom half with the mirrored value in the top half
# If they aren't the same, the matrix isn't symmetric
if adjacency_matrix[j][i] != adjacency_matrix[i][j]:
return False
max_i += 1
# If we reach this far without returning false, then we know that everything matched,
# which makes this a symmetric matrix
return True | Determines if an adjacency matrix is symmetric.
Ref: http://mathworld.wolfram.com/SymmetricMatrix.html |
def a_star_search(graph, start, goal):
all_nodes = graph.get_all_node_ids()
if start not in all_nodes:
raise NonexistentNodeError(start)
if goal not in all_nodes:
raise NonexistentNodeError(goal)
came_from, cost_so_far, goal_reached = _a_star_search_internal(graph, start, goal)
if goal_reached:
path = reconstruct_path(came_from, start, goal)
path.reverse()
return path
else:
return [] | Runs an A* search on the specified graph to find a path from the ''start'' node to the ''goal'' node.
Returns a list of nodes specifying a minimal path between the two nodes.
If no path exists (disconnected components), returns an empty list. |
def _a_star_search_internal(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {start: None}
cost_so_far = {start: 0}
goal_reached = False
while not frontier.empty():
current = frontier.get()
if current == goal:
goal_reached = True
break
for next_node in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.edge_cost(current, next_node)
if next_node not in cost_so_far or new_cost < cost_so_far[next_node]:
cost_so_far[next_node] = new_cost
priority = new_cost + heuristic(goal, next_node)
frontier.put(next_node, priority)
came_from[next_node] = current
return came_from, cost_so_far, goal_reached | Performs an A* search, returning information about whether the goal node was reached
and path cost information that can be used to reconstruct the path. |
def add_set(self):
self.__label_counter += 1
new_label = self.__label_counter
self.__forest[new_label] = -1 # All new sets have their parent set to themselves
self.__set_counter += 1
return new_label | Adds a new set to the forest.
Returns a label by which the new set can be referenced |
def find(self, node_label):
queue = []
current_node = node_label
while self.__forest[current_node] >= 0:
queue.append(current_node)
current_node = self.__forest[current_node]
root_node = current_node
# Path compression
for n in queue:
self.__forest[n] = root_node
return root_node | Finds the set containing the node_label.
Returns the set label. |
def union(self, label_a, label_b):
# Base case to avoid work
if label_a == label_b:
return
# Find the tree root of each node
root_a = self.find(label_a)
root_b = self.find(label_b)
# Avoid merging a tree to itself
if root_a == root_b:
return
self.__internal_union(root_a, root_b)
self.__set_counter -= 1 | Joins two sets into a single new set.
label_a, label_b can be any nodes within the sets |
def __internal_union(self, root_a, root_b):
# Merge the trees, smaller to larger
update_rank = False
# --Determine the larger tree
rank_a = self.__forest[root_a]
rank_b = self.__forest[root_b]
if rank_a < rank_b:
larger = root_b
smaller = root_a
else:
larger = root_a
smaller = root_b
if rank_a == rank_b:
update_rank = True
# --Make the smaller tree a subtree of the larger tree
self.__forest[smaller] = larger
# --Update the rank of the new tree (if necessary)
if update_rank:
self.__forest[larger] -= 1 | Internal function to join two set trees specified by root_a and root_b.
Assumes root_a and root_b are distinct. |
def is_planar(graph):
# Determine connected components as subgraphs; their planarity is independent of each other
connected_components = get_connected_components_as_subgraphs(graph)
for component in connected_components:
# Biconnected components likewise have independent planarity
biconnected_components = find_biconnected_components_as_subgraphs(component)
for bi_component in biconnected_components:
planarity = __is_subgraph_planar(bi_component)
if not planarity:
return False
return True | Determines whether a graph is planar or not. |
def __is_subgraph_planar(graph):
# --First pass: Determine edge and vertex counts validate Euler's Formula
num_nodes = graph.num_nodes()
num_edges = graph.num_edges()
# --We can guarantee that if there are 4 or less nodes, then the graph is planar
# --A 4-node simple graph has a maximum of 6 possible edges (K4); this will always satisfy Euler's Formula:
# -- 6 <= 3(4 - 2)
if num_nodes < 5:
return True
if num_edges > 3*(num_nodes - 2):
return False
# --At this point, we have no choice but to run the calculation the hard way
return kocay_planarity_test(graph) | Internal function to determine if a subgraph is planar. |
def __setup_dfs_data(graph, adj):
dfs_data = __get_dfs_data(graph, adj)
dfs_data['graph'] = graph
dfs_data['adj'] = adj
L1, L2 = __low_point_dfs(dfs_data)
dfs_data['lowpoint_1_lookup'] = L1
dfs_data['lowpoint_2_lookup'] = L2
edge_weights = __calculate_edge_weights(dfs_data)
dfs_data['edge_weights'] = edge_weights
return dfs_data | Sets up the dfs_data object, for consistency. |
def __calculate_edge_weights(dfs_data):
graph = dfs_data['graph']
weights = {}
for edge_id in graph.get_all_edge_ids():
edge_weight = __edge_weight(edge_id, dfs_data)
weights[edge_id] = edge_weight
return weights | Calculates the weight of each edge, for embedding-order sorting. |
def __sort_adjacency_lists(dfs_data):
new_adjacency_lists = {}
adjacency_lists = dfs_data['adj']
edge_weights = dfs_data['edge_weights']
edge_lookup = dfs_data['edge_lookup']
for node_id, adj_list in list(adjacency_lists.items()):
node_weight_lookup = {}
frond_lookup = {}
for node_b in adj_list:
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(node_id, node_b)
node_weight_lookup[node_b] = edge_weights[edge_id]
frond_lookup[node_b] = 1 if edge_lookup[edge_id] == 'backedge' else 2
# Fronds should be before branches if the weights are equal
new_list = sorted(adj_list, key=lambda n: frond_lookup[n])
# Sort by weights
new_list.sort(key=lambda n: node_weight_lookup[n])
# Add the new sorted list to the new adjacency list lookup table
new_adjacency_lists[node_id] = new_list
return new_adjacency_lists | Sorts the adjacency list representation by the edge weights. |
def __branch_point_dfs(dfs_data):
u = dfs_data['ordering'][0]
large_n = {}
large_n[u] = 0
stem = {}
stem[u] = u
b = {}
b[u] = 1
__branch_point_dfs_recursive(u, large_n, b, stem, dfs_data)
dfs_data['N_u_lookup'] = large_n
dfs_data['b_u_lookup'] = b
return | DFS that calculates the b(u) and N(u) lookups, and also reorders the adjacency lists. |
def __embed_branch(dfs_data):
u = dfs_data['ordering'][0]
dfs_data['LF'] = []
dfs_data['RF'] = []
dfs_data['FG'] = {}
n = dfs_data['graph'].num_nodes()
f0 = (0, n)
g0 = (0, n)
L0 = {'u': 0, 'v': n}
R0 = {'x': 0, 'y': n}
dfs_data['LF'].append(f0)
dfs_data['RF'].append(g0)
dfs_data['FG'][0] = [L0, R0]
dfs_data['FG']['m'] = 0
dfs_data['FG']['l'] = 0
dfs_data['FG']['r'] = 0
#print 'DFS Ordering: {}'.format(dfs_data['ordering'])
#for node in dfs_data['ordering']:
#print '{}: {}'.format(node, dfs_data['adj'][node])
nonplanar = __embed_branch_recursive(u, dfs_data)
#print "Nonplanar:", nonplanar
return not nonplanar | Builds the combinatorial embedding of the graph. Returns whether the graph is planar. |
def __insert_branch(u, v, dfs_data):
w = L1(v, dfs_data)
d_u = D(u, dfs_data)
d_w = D(w, dfs_data)
# Embed uw
successful = __embed_frond(u, w, dfs_data)
if not successful:
return False
# Embed a branch marker uu on the side opposite to uw, in the same frond block
#successful = __embed_frond(u, v, dfs_data, as_branch_marker=True)
successful = __embed_frond(u, u, dfs_data, as_branch_marker=True)
if not successful:
return False
return True | Embeds a branch Bu(v) (as described on page 22 of the paper). Returns whether the embedding was successful. |
def __do_case_5_work(d_w, d_u, case_1, case_2, case_3, dfs_data):
# --We should only ever see u-cases 1 and 2
if case_3:
# --We should never get here
return False
comp_d_w = abs(d_w)
#if case_1:
# --Add the frond to the left side
__insert_frond_LF(d_w, d_u, dfs_data)
# --Add uw to Lm
m = dfs_data['FG']['m']
Lm = L(m, dfs_data)
if comp_d_w < Lm['u']:
Lm['u'] = d_w
if d_u > Lm['v']:
Lm['v'] = d_u
# --Case 2 requires a bit of extra work
if case_2:
Lm['u'] = d_w
x_m1 = fn_x(m-1, dfs_data)
while comp_d_w < x_m1:
merge_Fm(dfs_data)
m = dfs_data['FG']['m']
x_m1 = fn_x(m-1, dfs_data)
#else:
#print "Case 5 work, u-case 1"
return True | Encapsulates the work that will be done for case 5 of __embed_frond,
since it gets used in more than one place. |
def __do_case_6_work(d_w, d_u, case_1, case_2, case_3, dfs_data):
# --We should only ever see u-cases 1 and 3
if case_2:
# --We should never get here
return False
comp_d_w = abs(d_w)
# --Add the frond to the right side
__insert_frond_RF(d_w, d_u, dfs_data)
# --Add uw to Rm
m = dfs_data['FG']['m']
Rm = R(m, dfs_data)
if comp_d_w < Rm['x']:
Rm['x'] = d_w
if d_u > Rm['y']:
Rm['y'] = d_u
# --Case 3 requires a bit of extra work
if case_3:
Rm['x'] = d_w
u_m1 = u(m-1, dfs_data)
while comp_d_w < u_m1:
merge_Fm(dfs_data)
m = dfs_data['FG']['m']
u_m1 = u(m-1, dfs_data)
#else:
#print "Case 6 work, u-case 1"
return True | Encapsulates the work that will be done for case 6 of __embed_frond,
since it gets used in more than one place. |
def __insert_frond_RF(d_w, d_u, dfs_data):
# --Add the frond to the right side
dfs_data['RF'].append( (d_w, d_u) )
dfs_data['FG']['r'] += 1
dfs_data['last_inserted_side'] = 'RF' | Encapsulates the process of inserting a frond uw into the right side frond group. |
def __insert_frond_LF(d_w, d_u, dfs_data):
# --Add the frond to the left side
dfs_data['LF'].append( (d_w, d_u) )
dfs_data['FG']['l'] += 1
dfs_data['last_inserted_side'] = 'LF' | Encapsulates the process of inserting a frond uw into the left side frond group. |
def merge_Fm(dfs_data):
FG = dfs_data['FG']
m = FG['m']
FGm = FG[m]
FGm1 = FG[m-1]
if FGm[0]['u'] < FGm1[0]['u']:
FGm1[0]['u'] = FGm[0]['u']
if FGm[0]['v'] > FGm1[0]['v']:
FGm1[0]['v'] = FGm[0]['v']
if FGm[1]['x'] < FGm1[1]['x']:
FGm1[1]['x'] = FGm[1]['x']
if FGm[1]['y'] > FGm1[1]['y']:
FGm1[1]['y'] = FGm[1]['y']
del FG[m]
FG['m'] -= 1 | Merges Fm-1 and Fm, as defined on page 19 of the paper. |
def __check_left_side_conflict(x, y, dfs_data):
l = dfs_data['FG']['l']
w, z = dfs_data['LF'][l]
return __check_conflict_fronds(x, y, w, z, dfs_data) | Checks to see if the frond xy will conflict with a frond on the left side of the embedding. |
def __check_right_side_conflict(x, y, dfs_data):
r = dfs_data['FG']['r']
w, z = dfs_data['RF'][r]
return __check_conflict_fronds(x, y, w, z, dfs_data) | Checks to see if the frond xy will conflict with a frond on the right side of the embedding. |
def __check_conflict_fronds(x, y, w, z, dfs_data):
# Case 1: False frond and corresponding branch marker
# --x and w should both be negative, and either xy or wz should be the same value uu
if x < 0 and w < 0 and (x == y or w == z):
# --Determine if the marker and frond correspond (have the same low-value)
if x == w:
return True
return False
# Case 2: Fronds with an overlap
if b(x, dfs_data) == b(w, dfs_data) and x > w and w > y and y > z:
return False
# Case 3: Branch marker and a frond on that branch
if x < 0 or w < 0:
# --Determine which one is the branch marker
if x < 0:
u = abs(x)
t = y
x = w
y = z
else:
u = abs(w)
t = z
# --Run the rest of the tests
if b(x, dfs_data) == u and y < u and \
(x, y) in __dfsify_branch_uv(u, t, dfs_data):
return True
return False
# If non of the conflict conditions were met, then there are obviously no conflicts
return False | Checks a pair of fronds to see if they conflict. Returns True if a conflict was found, False otherwise. |
def __dfsify_branch_uv(u, v, dfs_data):
buv = B(u, v, dfs_data)
new_list = []
for edge_id in buv:
edge = dfs_data['graph'].get_edge(edge_id)
j, k = edge['vertices']
d_x = D(j, dfs_data)
d_y = D(k, dfs_data)
if d_x < d_y:
smaller = d_x
larger = d_y
else:
smaller = d_y
larger = d_x
frond = (smaller, larger)
new_list.append(frond)
return new_list | Helper function to convert the output of Bu(v) from edge ids to dfs-ordered fronds. |
def __get_dfs_data(graph, adj=None):
ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, adjacency_lists=adj)
ordering_lookup = dict(list(zip(ordering, list(range(1, len(ordering) + 1)))))
node_lookup = dict(list(zip(list(range(1, len(ordering) + 1)), ordering)))
edge_lookup = {}
for edge_id in graph.get_all_edge_ids():
edge = graph.get_edge(edge_id)
node_a, node_b = edge['vertices']
parent_a = parent_lookup[node_a]
parent_b = parent_lookup[node_b]
if parent_a == node_b or parent_b == node_a:
edge_lookup[edge_id] = 'tree-edge'
else:
edge_lookup[edge_id] = 'backedge'
dfs_data = {}
dfs_data['ordering'] = ordering
dfs_data['ordering_lookup'] = ordering_lookup
dfs_data['node_lookup'] = node_lookup
dfs_data['edge_lookup'] = edge_lookup
dfs_data['parent_lookup'] = parent_lookup
dfs_data['children_lookup'] = children_lookup
return dfs_data | Internal function that calculates the depth-first search of the graph.
Returns a dictionary with the following data:
* 'ordering': A dfs-ordering list of nodes
* 'ordering_lookup': A lookup dict mapping nodes to dfs-ordering
* 'node_lookup': A lookup dict mapping dfs-ordering to nodes
* 'edge_lookup': A lookup dict mapping edges as tree-edges or back-edges
* 'parent_lookup': A lookup dict mapping nodes to their parent node
* 'children_lookup': A lookup dict mapping nodes to their children |
def __calculate_adjacency_lists(graph):
adj = {}
for node in graph.get_all_node_ids():
neighbors = graph.neighbors(node)
adj[node] = neighbors
return adj | Builds an adjacency list representation for the graph, since we can't guarantee that the
internal representation of the graph is stored that way. |
def __get_all_lowpoints(dfs_data):
lowpoint_1_lookup = {}
lowpoint_2_lookup = {}
ordering = dfs_data['ordering']
for node in ordering:
low_1, low_2 = __get_lowpoints(node, dfs_data)
lowpoint_1_lookup[node] = low_1
lowpoint_2_lookup[node] = low_2
return lowpoint_1_lookup, lowpoint_2_lookup | Calculates the lowpoints for each node in a graph. |
def __get_lowpoints(node, dfs_data):
ordering_lookup = dfs_data['ordering_lookup']
t_u = T(node, dfs_data)
sorted_t_u = sorted(t_u, key=lambda a: ordering_lookup[a])
lowpoint_1 = sorted_t_u[0]
lowpoint_2 = sorted_t_u[1]
return lowpoint_1, lowpoint_2 | Calculates the lowpoints for a single node in a graph. |
def __edge_weight(edge_id, dfs_data):
graph = dfs_data['graph']
edge_lookup = dfs_data['edge_lookup']
edge = graph.get_edge(edge_id)
u, v = edge['vertices']
d_u = D(u, dfs_data)
d_v = D(v, dfs_data)
lp_1 = L1(v, dfs_data)
d_lp_1 = D(lp_1, dfs_data)
if edge_lookup[edge_id] == 'backedge' and d_v < d_u:
return 2*d_v
elif is_type_I_branch(u, v, dfs_data):
return 2*d_lp_1
elif is_type_II_branch(u, v, dfs_data):
return 2*d_lp_1 + 1
else:
return 2*graph.num_nodes() + 1 | Calculates the edge weight used to sort edges. |
def __calculate_bu_dfs(dfs_data):
u = dfs_data['ordering'][0]
b = {}
b[u] = D(u, dfs_data)
__calculate_bu_dfs_recursively(u, b, dfs_data)
return b | Calculates the b(u) lookup table. |
def __calculate_bu_dfs_recursively(u, b, dfs_data):
first_time = True
for v in dfs_data['adj'][u]:
if a(v, dfs_data) == u:
if first_time:
b[v] = b[u]
else:
b[v] = D(u, dfs_data)
__calculate_bu_dfs_recursively(v, b, dfs_data)
first_time = False | Calculates the b(u) lookup table with a recursive DFS. |
def is_type_I_branch(u, v, dfs_data):
if u != a(v, dfs_data):
return False
if u == L2(v, dfs_data):
return True
return False | Determines whether a branch uv is a type I branch. |
def is_type_II_branch(u, v, dfs_data):
if u != a(v, dfs_data):
return False
if u < L2(v, dfs_data):
return True
return False | Determines whether a branch uv is a type II branch. |
def is_frond(u, v, dfs_data):
d_u = D(u, dfs_data)
d_v = D(v, dfs_data)
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(u, v)
return True if dfs_data['edge_lookup'][edge_id] == 'backedge' and d_v < d_u else False | Determines if the edge uv is a frond ("backedge"). |
def __get_descendants(node, dfs_data):
list_of_descendants = []
stack = deque()
children_lookup = dfs_data['children_lookup']
current_node = node
children = children_lookup[current_node]
dfs_current_node = D(current_node, dfs_data)
for n in children:
dfs_child = D(n, dfs_data)
# Validate that the child node is actually a descendant and not an ancestor
if dfs_child > dfs_current_node:
stack.append(n)
while len(stack) > 0:
current_node = stack.pop()
list_of_descendants.append(current_node)
children = children_lookup[current_node]
dfs_current_node = D(current_node, dfs_data)
for n in children:
dfs_child = D(n, dfs_data)
# Validate that the child node is actually a descendant and not an ancestor
if dfs_child > dfs_current_node:
stack.append(n)
return list_of_descendants | Gets the descendants of a node. |
def S_star(u, dfs_data):
s_u = S(u, dfs_data)
if u not in s_u:
s_u.append(u)
return s_u | The set of all descendants of u, with u added. |
def T(u, dfs_data):
return list(set([w for v in S_star(u, dfs_data) for w in A(v, dfs_data)])) | T(u) consists of all vertices adjacent to u or any descendant of u. |
def B(u, v, dfs_data):
"""Bu(v) = {wx | w is in S*(v)}"""
if a(v, dfs_data) != u:
return None
return list(set([edge_id for w in S_star(v, dfs_data) for edge_id in dfs_data['graph'].get_node(w)['edges']])) | The branch at u containing v is the set of all edges incident on v or any descendant of v, if a(v) == u. |
def stem(u, v, dfs_data):
#return dfs_data['graph'].get_first_edge_id_by_node_ids(u, v)
uv_edges = dfs_data['graph'].get_edge_ids_by_node_ids(u, v)
buv_edges = B(u, v, dfs_data)
for edge_id in uv_edges:
if edge_id in buv_edges:
return edge_id
return None | The stem of Bu(v) is the edge uv in Bu(v). |
def wt(u, v, dfs_data):
# Determine the edge_id
edge_id = dfs_data['graph'].get_first_edge_id_by_node_ids(u, v)
# Pull the weight of that edge
return dfs_data['edge_weights'][edge_id] | The wt_u[v] function used in the paper. |
def _L(dfs_data):
"""L(T) = {v | the first w in Adj[v] corresponds to a frond vw}."""
node_set = set()
for v, adj in list(dfs_data['adj'].items()):
w = adj[0]
if is_frond(v, w, dfs_data):
node_set.add(v)
return list(node_set) | L(T) contains leaves and branch points for the DFS-tree T. |
def fn_x(i, dfs_data):
try:
return R(i, dfs_data)['x']
except Exception as e:
# Page 17 states that if Ri is empty, then we take xi to be n
return dfs_data['graph'].num_nodes() | The minimum vertex (DFS-number) in a frond contained in Ri. |
def classify_segmented_recording(recording, result_format=None):
global single_symbol_classifier
if single_symbol_classifier is None:
single_symbol_classifier = SingleClassificer()
return single_symbol_classifier.predict(recording, result_format) | Use this function if you are sure you have a single symbol.
Parameters
----------
recording : string
The recording in JSON format
Returns
-------
list of dictionaries
Each dictionary contains the keys 'symbol' and 'probability'. The list
is sorted descending by probability. |
def predict(self, recording, result_format=None):
evaluate = utils.evaluate_model_single_recording_preloaded
results = evaluate(self.preprocessing_queue,
self.feature_list,
self.model,
self.output_semantics,
recording)
if result_format == 'LaTeX':
for i in range(len(results)):
results[i]['semantics'] = results[i]['semantics'].split(";")[1]
for i in range(len(results)):
splitted = results[i]['semantics'].split(";")
results[i]['complete_latex'] = splitted[1]
return results | Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list |
def main(symbol_yml_file, raw_pickle_file, pickle_dest_path):
metadata = get_metadata()
symbol_ids = get_symbol_ids(symbol_yml_file, metadata)
symbol_ids = transform_sids(symbol_ids)
raw = load_raw(raw_pickle_file)
filter_and_save(raw, symbol_ids, pickle_dest_path) | Parameters
----------
symbol_yml_file : str
Path to a YAML file which contains recordings.
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
pickle_dest_path : str
Path where the filtered dict gets serialized as a pickle file again. |
def get_metadata():
misc_path = pkg_resources.resource_filename('hwrt', 'misc/')
wm_symbols = os.path.join(misc_path, 'wm_symbols.csv')
wm_tags = os.path.join(misc_path, 'wm_tags.csv')
wm_tags2symbols = os.path.join(misc_path, 'wm_tags2symbols.csv')
return {'symbols': read_csv(wm_symbols),
'tags': read_csv(wm_tags),
'tags2symbols': read_csv(wm_tags2symbols)} | Get metadata of symbols, like their tags, id on write-math.com, LaTeX
command and unicode code point.
Returns
-------
dict |
def read_csv(filepath):
symbols = []
with open(filepath, 'rb') as csvfile:
spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
symbols.append(row)
return symbols | Read a CSV into a list of dictionarys. The first line of the CSV determines
the keys of the dictionary.
Parameters
----------
filepath : string
Returns
-------
list of dictionaries |
def load_raw(raw_pickle_file):
with open(raw_pickle_file, 'rb') as f:
raw = pickle.load(f)
logging.info("Loaded %i recordings.", len(raw['handwriting_datasets']))
return raw | Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file. |
def filter_and_save(raw, symbol_ids, destination_path):
logging.info('Start filtering...')
new_hw_ds = []
for el in raw['handwriting_datasets']:
if el['formula_id'] in symbol_ids:
el['formula_id'] = symbol_ids[el['formula_id']]
el['handwriting'].formula_id = symbol_ids[el['formula_id']]
new_hw_ds.append(el)
raw['handwriting_datasets'] = new_hw_ds
# pickle
logging.info('Start dumping %i recordings...', len(new_hw_ds))
pickle.dump(raw, open(destination_path, "wb"), 2) | Parameters
----------
raw : dict
with key 'handwriting_datasets'
symbol_ids : dict
Maps LaTeX to write-math.com id
destination_path : str
Path where the filtered dict 'raw' will be saved |
def get_metrics(metrics_description):
return utils.get_objectlist(metrics_description,
config_key='data_analyzation_plugins',
module=sys.modules[__name__]) | Get metrics from a list of dictionaries. |
def prepare_file(filename):
directory = os.path.join(utils.get_project_root(), "analyzation/")
if not os.path.exists(directory):
os.makedirs(directory)
workfilename = os.path.join(directory, filename)
open(workfilename, 'w').close() # Truncate the file
return workfilename | Truncate the file and return the filename. |
def sort_by_formula_id(raw_datasets):
by_formula_id = defaultdict(list)
for el in raw_datasets:
by_formula_id[el['handwriting'].formula_id].append(el['handwriting'])
return by_formula_id | Sort a list of formulas by `id`, where `id` represents the accepted
formula id.
Parameters
----------
raw_datasets : list of dictionaries
A list of raw datasets.
Examples
--------
The parameter `raw_datasets` has to be of the format
>>> rd = [{'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=2953),
... 'formula_in_latex': 'A',
... 'id': 2953},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4037),
... 'formula_in_latex': 'A',
... 'id': 4037},
... {'is_in_testset': 0,
... 'formula_id': 31,
... 'handwriting': HandwrittenData(raw_data_id=4056),
... 'formula_in_latex': 'A',
... 'id': 4056}]
>>> sort_by_formula_id(rd) |
def get_features(model_description_features):
return utils.get_objectlist(model_description_features,
config_key='features',
module=sys.modules[__name__]) | Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
] |
def print_featurelist(feature_list):
input_features = sum(map(lambda n: n.get_dimension(), feature_list))
print("## Features (%i)" % input_features)
print("```")
for algorithm in feature_list:
print("* %s" % str(algorithm))
print("```") | Print the feature_list in a human-readable form.
Parameters
----------
feature_list : list
feature objects |
def get_dimension(self):
if self.strokes > 0:
if self.pixel_env > 0:
return (2 + (1 + 2*self.pixel_env)**2) \
* self.strokes * self.points_per_stroke
else:
return 2*self.strokes * self.points_per_stroke
else:
if self.pen_down:
return 3*self.points_per_stroke
else:
return 2*self.points_per_stroke | Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers. |
def _features_without_strokes(self, hwr_obj):
x = []
for point in hwr_obj.get_pointlist()[0]:
if len(x) >= 3*self.points_per_stroke or \
(len(x) >= 2*self.points_per_stroke and not self.pen_down):
break
x.append(point['x'])
x.append(point['y'])
if self.pen_down:
if 'pen_down' not in point:
logging.error("The "
"ConstantPointCoordinates(strokes=0) "
"feature should only be used after "
"SpaceEvenly preprocessing step.")
else:
x.append(int(point['pen_down']))
if self.pen_down:
while len(x) != 3*self.points_per_stroke:
x.append(self.fill_empty_with)
else:
while len(x) != 2*self.points_per_stroke:
x.append(self.fill_empty_with)
return x | Calculate the ConstantPointCoordinates features for the case of
a single (callapesed) stroke with pen_down features. |
def _stroke_simplification(self, pointlist):
# Find the point with the biggest distance
dmax = 0
index = 0
for i in range(1, len(pointlist)):
d = geometry.perpendicular_distance(pointlist[i],
pointlist[0],
pointlist[-1])
if d > dmax:
index = i
dmax = d
# If the maximum distance is bigger than the threshold 'epsilon', then
# simplify the pointlist recursively
if dmax >= self.epsilon:
# Recursive call
rec_results1 = self._stroke_simplification(pointlist[0:index])
rec_results2 = self._stroke_simplification(pointlist[index:])
result_list = rec_results1[:-1] + rec_results2
else:
result_list = [pointlist[0], pointlist[-1]]
return result_list | The Douglas-Peucker line simplification takes a list of points as an
argument. It tries to simplifiy this list by removing as many points
as possible while still maintaining the overall shape of the stroke.
It does so by taking the first and the last point, connecting them
by a straight line and searchin for the point with the highest
distance. If that distance is bigger than 'epsilon', the point is
important and the algorithm continues recursively. |
def get_dimension(self):
return int(round(float(self.strokes**2)/2 + float(self.strokes)/2)) | Get the dimension of the returned feature. This equals the number
of elements in the returned list of numbers. |
def chunks_to_string(chunks):
string = ''
began_context = False
context_depth = 0
context_triggers = ['_', '^']
for chunk in chunks:
if began_context and chunk != '{':
string += '{' + chunk + '}'
began_context = False
elif began_context and chunk == '{':
began_context = False
string += chunk
else:
if chunk in context_triggers:
began_context = True
context_depth += 1
string += chunk
return string | Parameters
----------
chunks : list of strings
A list of single entities in order
Returns
-------
string :
A LaTeX-parsable string
Examples
--------
>>> chunks_to_string(['\\\\sum', '_', 'i', '^', 'n', 'i', '^', '2'])
'\\\\sum_{i}^{n}i^{2}'
>>> chunks_to_string(['\\\\sum', '_', '{', 'i', '}', '^', 'n', 'i', '^',
... '2'])
'\\\\sum_{i}^{n}i^{2}' |
def get_preprocessing_queue(preprocessing_list):
return utils.get_objectlist(preprocessing_list,
config_key='preprocessing',
module=sys.modules[__name__]) | Get preprocessing queue from a list of dictionaries
>>> l = [{'RemoveDuplicateTime': None},
{'ScaleAndShift': [{'center': True}]}
]
>>> get_preprocessing_queue(l)
[RemoveDuplicateTime, ScaleAndShift
- center: True
- max_width: 1
- max_height: 1
] |
def print_preprocessing_list(preprocessing_queue):
print("## Preprocessing")
print("```")
for algorithm in preprocessing_queue:
print("* " + str(algorithm))
print("```") | Print the ``preproc_list`` in a human-readable form.
Parameters
----------
preprocessing_queue : list of preprocessing objects
Algorithms that get applied for preprocessing. |
def _get_parameters(self, hwr_obj):
a = hwr_obj.get_bounding_box()
width = a['maxx'] - a['minx'] + self.width_add
height = a['maxy'] - a['miny'] + self.height_add
factor_x, factor_y = 1, 1
if width != 0:
factor_x = self.max_width / width
if height != 0:
factor_y = self.max_height / height
factor = min(factor_x, factor_y)
addx, addy = 0.0, 0.0
if self.center:
# Only one dimension (x or y) has to be centered (the smaller one)
add = -(factor / (2.0 * max(factor_x, factor_y)))
if factor == factor_x:
addy = add
if self.center_other:
addx = -(width * factor / 2.0)
else:
addx = add
if self.center_other:
addy = -(height * factor / 2.0)
assert factor > 0, "factor > 0 is False. factor = %s" % str(factor)
assert isinstance(addx, float), "addx is %s" % str(addx)
assert isinstance(addy, float), "addy is %s" % str(addy)
assert isinstance(a['minx'], (int, float)), "minx is %s" % str(a['minx'])
assert isinstance(a['miny'], (int, float)), "miny is %s" % str(a['miny'])
assert isinstance(a['mint'], (int, float)), "mint is %s" % str(a['mint'])
return {"factor": factor, "addx": addx, "addy": addy,
"minx": a['minx'], "miny": a['miny'], "mint": a['mint']} | Take a list of points and calculate the factors for scaling and
moving it so that it's in the unit square. Keept the aspect
ratio.
Optionally center the points inside of the unit square. |
def _calculate_pen_down_strokes(self, pointlist, times=None):
if times is None:
times = []
for stroke in pointlist:
stroke_info = {"start": stroke[0]['time'],
"end": stroke[-1]['time'],
"pen_down": True}
# set up variables for interpolation
x, y, t = [], [], []
for point in stroke:
if point['time'] not in t:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
x, y = numpy.array(x), numpy.array(y)
if len(t) == 1:
# constant interpolation
fx, fy = lambda x: float(x), lambda y: float(y)
elif len(t) == 2:
# linear interpolation
fx, fy = interp1d(t, x, 'linear'), interp1d(t, y, 'linear')
elif len(t) == 3:
# quadratic interpolation
fx = interp1d(t, x, 'quadratic')
fy = interp1d(t, y, 'quadratic')
else:
fx, fy = interp1d(t, x, self.kind), interp1d(t, y, self.kind)
stroke_info['fx'] = fx
stroke_info['fy'] = fy
times.append(stroke_info)
return times | Calculate the intervall borders 'times' that contain the information
when a stroke started, when it ended and how it should be
interpolated. |
def _calculate_pen_up_strokes(self, pointlist, times=None):
if times is None:
times = []
for i in range(len(pointlist) - 1):
stroke_info = {"start": pointlist[i][-1]['time'],
"end": pointlist[i + 1][0]['time'],
"pen_down": False}
x, y, t = [], [], []
for point in [pointlist[i][-1], pointlist[i + 1][0]]:
if point['time'] not in t:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
if len(x) == 1:
# constant interpolation
fx, fy = lambda x: float(x), lambda y: float(y)
else:
# linear interpolation
x, y = numpy.array(x), numpy.array(y)
fx = interp1d(t, x, kind='linear')
fy = interp1d(t, y, kind='linear')
stroke_info['fx'] = fx
stroke_info['fy'] = fy
times.append(stroke_info)
return times | 'Pen-up' strokes are virtual strokes that were not drawn. It
models the time when the user moved from one stroke to the next. |
def _space(self, hwr_obj, stroke, kind):
new_stroke = []
stroke = sorted(stroke, key=lambda p: p['time'])
x, y, t = [], [], []
for point in stroke:
x.append(point['x'])
y.append(point['y'])
t.append(point['time'])
x, y = numpy.array(x), numpy.array(y)
failed = False
try:
fx = interp1d(t, x, kind=kind)
fy = interp1d(t, y, kind=kind)
except Exception as e: # pylint: disable=W0703
if hwr_obj.raw_data_id is not None:
logging.debug("spline failed for raw_data_id %i",
hwr_obj.raw_data_id)
else:
logging.debug("spline failed")
logging.debug(e)
failed = True
tnew = numpy.linspace(t[0], t[-1], self.number)
# linear interpolation fallback due to
# https://github.com/scipy/scipy/issues/3868
if failed:
try:
fx = interp1d(t, x, kind='linear')
fy = interp1d(t, y, kind='linear')
failed = False
except Exception as e:
logging.debug("len(stroke) = %i", len(stroke))
logging.debug("len(x) = %i", len(x))
logging.debug("len(y) = %i", len(y))
logging.debug("stroke=%s", stroke)
raise e
for x, y, t in zip(fx(tnew), fy(tnew), tnew):
new_stroke.append({'x': x, 'y': y, 'time': t})
return new_stroke | Do the interpolation of 'kind' for 'stroke |
def _calculate_average(self, points):
assert len(self.theta) == len(points), \
"points has length %i, but should have length %i" % \
(len(points), len(self.theta))
new_point = {'x': 0, 'y': 0, 'time': 0}
for key in new_point:
new_point[key] = self.theta[0] * points[0][key] + \
self.theta[1] * points[1][key] + \
self.theta[2] * points[2][key]
return new_point | Calculate the arithmetic mean of the points x and y coordinates
seperately. |
def create_model(model_folder, model_type, topology, override):
latest_model = utils.get_latest_in_folder(model_folder, ".json")
if (latest_model == "") or override:
logging.info("Create a base model...")
model_src = os.path.join(model_folder, "model-0.json")
command = "%s make %s %s > %s" % (utils.get_nntoolkit(),
model_type,
topology,
model_src)
logging.info(command)
os.system(command)
else:
logging.info("Model file already existed.") | Create a model if it doesn't exist already.
Parameters
----------
model_folder :
The path to the folder where the model is described with an `info.yml`
model_type :
MLP
topology :
Something like 160:500:369 - that means the first layer has 160
neurons, the second layer has 500 neurons and the last layer has 369
neurons.
override : boolean
If a model exists, override it. |
def main(model_folder, override=False):
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
project_root = utils.get_project_root()
# Read the feature description file
feature_folder = os.path.join(project_root,
model_description['data-source'])
with open(os.path.join(feature_folder, "info.yml"), 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
# Get a list of all used features
feature_list = features.get_features(feature_description['features'])
# Get the dimension of the feature vector
input_features = sum(map(lambda n: n.get_dimension(), feature_list))
logging.info("Number of features: %i", input_features)
# Analyze model
logging.info(model_description['model'])
if model_description['model']['type'] != 'mlp':
return
create_model(model_folder,
model_description['model']['type'],
model_description['model']['topology'],
override)
utils.create_run_logfile(model_folder) | Parse the info.yml from ``model_folder`` and create the model file. |
def get_parser():
project_root = utils.get_project_root()
# Get latest model folder
models_folder = os.path.join(project_root, "models")
latest_model = utils.get_latest_folder(models_folder)
# Get command line arguments
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--model",
dest="model",
help="where is the model folder (with a info.yml)?",
metavar="FOLDER",
type=lambda x: utils.is_valid_folder(parser, x),
default=latest_model)
parser.add_argument("-o", "--override",
action="store_true", dest="override",
default=False,
help=("should the model be overridden "
"if it already exists?"))
return parser | Return the parser object for this script. |
def submit_recording(raw_data_json):
url = "http://www.martin-thoma.de/write-math/classify/index.php"
headers = {'User-Agent': 'Mozilla/5.0',
'Content-Type': 'application/x-www-form-urlencoded'}
payload = {'drawnJSON': raw_data_json}
s = requests.Session()
req = requests.Request('POST', url, headers=headers, data=payload)
prepared = req.prepare()
s.send(prepared) | Submit a recording to the database on write-math.com.
Parameters
----------
raw_data_json : str
Raw data in JSON format
Raises
------
requests.exceptions.ConnectionError
If the internet connection is lost. |
def show_results(results, n=10):
import nntoolkit.evaluate
classification = nntoolkit.evaluate.show_results(results, n)
return "<pre>" + classification.replace("\n", "<br/>") + "</pre>" | Show the TOP n results of a classification.
>>> results = [{'\\alpha': 0.67}, {'\\propto': 0.25}]
>>> show_results(results) |
def interactive():
global n
if request.method == 'GET' and request.args.get('heartbeat', '') != "":
return request.args.get('heartbeat', '')
if request.method == 'POST':
logging.warning('POST to /interactive is deprecated. '
'Use /worker instead')
else:
# Page where the user can enter a recording
return render_template('canvas.html') | Interactive classifier. |
def get_json_result(results, n=10):
s = []
last = -1
for res in results[:min(len(results), n)]:
if res['probability'] < last*0.5 and res['probability'] < 0.05:
break
if res['probability'] < 0.01:
break
s.append(res)
last = res['probability']
return json.dumps(s) | Return the top `n` results as a JSON list.
>>> results = [{'probability': 0.65,
... 'whatever': 'bar'},
... {'probability': 0.21,
... 'whatever': 'bar'},
... {'probability': 0.05,
... 'whatever': 'bar'},]
>>> get_json_result(results, n=10)
[{'\\alpha': 0.65}, {'\\propto': 0.25}, {'\\varpropto': 0.0512}] |
def worker():
global n
global use_segmenter_flag
if request.method == 'POST':
raw_data_json = request.form['classify']
try:
secret_uuid = request.form['secret']
except:
logging.info("No secret uuid given. Create one.")
secret_uuid = str(uuid.uuid4())
# Check recording
try:
json.loads(raw_data_json)
except ValueError:
return "Invalid JSON string: %s" % raw_data_json
# Classify
if use_segmenter_flag:
strokelist = json.loads(raw_data_json)
beam = utils.get_beam(secret_uuid)
if beam is None:
beam = se.Beam()
for stroke in strokelist:
beam.add_stroke(stroke)
results = beam.get_results()
utils.store_beam(beam, secret_uuid)
else:
stroke = strokelist[-1]
beam.add_stroke(stroke)
results = beam.get_results()
utils.store_beam(beam, secret_uuid)
else:
results = classify.classify_segmented_recording(raw_data_json)
return get_json_result(results, n=n)
else:
# Page where the user can enter a recording
return "Classification Worker (Version %s)" % hwrt.__version__ | Implement a worker for write-math.com. |
def _get_part(pointlist, strokes):
result = []
strokes = sorted(strokes)
for stroke_index in strokes:
result.append(pointlist[stroke_index])
return result | Get some strokes of pointlist
Parameters
----------
pointlist : list of lists of dicts
strokes : list of integers
Returns
-------
list of lists of dicts |
def _get_translate():
translate = {}
model_path = pkg_resources.resource_filename('hwrt', 'misc/')
translation_csv = os.path.join(model_path, 'latex2writemathindex.csv')
arguments = {'newline': '', 'encoding': 'utf8'}
with open(translation_csv, 'rt', **arguments) as csvfile:
contents = csvfile.read()
lines = contents.split("\n")
for csvrow in lines:
csvrow = csvrow.split(',')
if len(csvrow) == 1:
writemathid = csvrow[0]
latex = ""
else:
writemathid, latex = csvrow[0], csvrow[1:]
latex = ','.join(latex)
translate[latex] = writemathid
return translate | Get a dictionary which translates from a neural network output to
semantics. |
def get_writemath_id(el, translate):
semantics = el['semantics'].split(";")[1]
if semantics not in translate:
logging.debug("Could not find '%s' in translate.", semantics)
logging.debug("el: %s", el)
return None
else:
writemathid = translate[semantics]
return writemathid | Parameters
----------
el : dict
with key 'semantics'
results element
Returns
-------
int or None:
ID of the symbol on write-math.com |
def fix_writemath_answer(results):
new_results = []
# Read csv
translate = _get_translate()
for i, el in enumerate(results):
writemathid = get_writemath_id(el, translate)
if writemathid is None:
continue
new_results.append({'symbolnr': el['symbolnr'],
'semantics': writemathid,
'probability': el['probability']})
if i >= 10 or (i > 0 and el['probability'] < 0.20):
break
return new_results | Bring ``results`` into a format that is accepted by write-math.com. This
means using the ID for the formula that is used by the write-math server.
Examples
--------
>>> results = [{'symbolnr': 214,
... 'semantics': '\\triangleq',
... 'probability': 0.03}, ...]
>>> fix_writemath_answer(results)
[{123: 0.03}, ...] |
def get_parser():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n",
dest="n", default=10, type=int,
help="Show TOP-N results")
parser.add_argument("--port",
dest="port", default=5000, type=int,
help="where should the webserver run")
parser.add_argument("--use_segmenter",
dest="use_segmenter",
default=False,
action='store_true',
help=("try to segment the input for multiple symbol "
"recognition"))
return parser | Return the parser object for this script. |
def main(port=8000, n_output=10, use_segmenter=False):
global n
global use_segmenter_flag
n = n_output
use_segmenter_flag = use_segmenter
logging.info("Start webserver...")
app.run(port=port) | Main function starting the webserver. |
def update_if_outdated(folder):
folders = []
while os.path.isdir(folder):
folders.append(folder)
# Get info.yml
with open(os.path.join(folder, "info.yml")) as ymlfile:
content = yaml.load(ymlfile)
folder = os.path.join(utils.get_project_root(), content['data-source'])
raw_source_file = folder
if not os.path.isfile(raw_source_file):
logging.error("File '%s' was not found.", raw_source_file)
logging.error("You should eventually execute 'hwrt download'.")
sys.exit(-1)
dt = os.path.getmtime(raw_source_file)
source_mtime = datetime.datetime.utcfromtimestamp(dt)
folders = folders[::-1] # Reverse order to get the most "basic one first"
for target_folder in folders:
target_mtime = utils.get_latest_successful_run(target_folder)
if target_mtime is None or source_mtime > target_mtime:
# The source is later than the target. That means we need to
# refresh the target
if "preprocessed" in target_folder:
logging.info("Preprocessed file was outdated. Update...")
preprocess_dataset.main(os.path.join(utils.get_project_root(),
target_folder))
elif "feature-files" in target_folder:
logging.info("Feature file was outdated. Update...")
create_ffiles.main(target_folder)
elif "model" in target_folder:
logging.info("Model file was outdated. Update...")
create_model.main(target_folder, True)
target_mtime = datetime.datetime.utcnow()
else:
logging.info("'%s' is up-to-date.", target_folder)
source_mtime = target_mtime | Check if the currently watched instance (model, feature or
preprocessing) is outdated and update it eventually. |
def generate_training_command(model_folder):
update_if_outdated(model_folder)
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
# Get the data paths (hdf5 files)
project_root = utils.get_project_root()
data = {}
data['training'] = os.path.join(project_root,
model_description["data-source"],
"traindata.hdf5")
data['testing'] = os.path.join(project_root,
model_description["data-source"],
"testdata.hdf5")
data['validating'] = os.path.join(project_root,
model_description["data-source"],
"validdata.hdf5")
# Get latest model file
basename = "model"
latest_model = utils.get_latest_working_model(model_folder)
if latest_model == "":
logging.error("There is no model with basename '%s'.", basename)
return None
else:
logging.info("Model '%s' found.", latest_model)
i = int(latest_model.split("-")[-1].split(".")[0])
model_src = os.path.join(model_folder, "%s-%i.json" % (basename, i))
model_target = os.path.join(model_folder,
"%s-%i.json" % (basename, i+1))
# generate the training command
training = model_description['training']
training = training.replace("{{testing}}", data['testing'])
training = training.replace("{{training}}", data['training'])
training = training.replace("{{validation}}", data['validating'])
training = training.replace("{{src_model}}", model_src)
training = training.replace("{{target_model}}", model_target)
training = training.replace("{{nntoolkit}}", utils.get_nntoolkit())
return training | Generate a string that contains a command with all necessary
parameters to train the model. |
def train_model(model_folder):
os.chdir(model_folder)
training = generate_training_command(model_folder)
if training is None:
return -1
logging.info(training)
os.chdir(model_folder)
os.system(training) | Train the model in ``model_folder``. |
def main(model_folder):
model_description_file = os.path.join(model_folder, "info.yml")
# Read the model description file
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
# Analyze model
logging.info(model_description['model'])
data = {}
data['training'] = os.path.join(model_folder, "traindata.hdf5")
data['testing'] = os.path.join(model_folder, "testdata.hdf5")
data['validating'] = os.path.join(model_folder, "validdata.hdf5")
train_model(model_folder) | Main part of the training script. |
def get_parser():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--model",
dest="model",
help="where is the model folder (with a info.yml)?",
metavar="FOLDER",
type=lambda x: utils.is_valid_folder(parser, x),
default=utils.default_model())
return parser | Return the parser object for this script. |
def get_bounding_box(points):
assert len(points) > 0, "At least one point has to be given."
min_x, max_x = points[0]['x'], points[0]['x']
min_y, max_y = points[0]['y'], points[0]['y']
for point in points:
min_x, max_x = min(min_x, point['x']), max(max_x, point['x'])
min_y, max_y = min(min_y, point['y']), max(max_y, point['y'])
p1 = Point(min_x, min_y)
p2 = Point(max_x, max_y)
return BoundingBox(p1, p2) | Get the bounding box of a list of points.
Parameters
----------
points : list of points
Returns
-------
BoundingBox |
def do_bb_intersect(a, b):
return a.p1.x <= b.p2.x \
and a.p2.x >= b.p1.x \
and a.p1.y <= b.p2.y \
and a.p2.y >= b.p1.y | Check if BoundingBox a intersects with BoundingBox b. |
def segments_distance(segment1, segment2):
assert isinstance(segment1, LineSegment), \
"segment1 is not a LineSegment, but a %s" % type(segment1)
assert isinstance(segment2, LineSegment), \
"segment2 is not a LineSegment, but a %s" % type(segment2)
if len(get_segments_intersections(segment1, segment2)) >= 1:
return 0
# try each of the 4 vertices w/the other segment
distances = []
distances.append(point_segment_distance(segment1.p1, segment2))
distances.append(point_segment_distance(segment1.p2, segment2))
distances.append(point_segment_distance(segment2.p1, segment1))
distances.append(point_segment_distance(segment2.p2, segment1))
return min(distances) | Calculate the distance between two line segments in the plane.
>>> a = LineSegment(Point(1,0), Point(2,0))
>>> b = LineSegment(Point(0,1), Point(0,2))
>>> "%0.2f" % segments_distance(a, b)
'1.41'
>>> c = LineSegment(Point(0,0), Point(5,5))
>>> d = LineSegment(Point(2,2), Point(4,4))
>>> e = LineSegment(Point(2,2), Point(7,7))
>>> "%0.2f" % segments_distance(c, d)
'0.00'
>>> "%0.2f" % segments_distance(c, e)
'0.00' |
def perpendicular_distance(p3, p1, p2):
px = p2['x']-p1['x']
py = p2['y']-p1['y']
squared_distance = px*px + py*py
if squared_distance == 0:
# The line is in fact only a single dot.
# In this case the distance of two points has to be
# calculated
line_point = Point(p1['x'], p1['y'])
point = Point(p3['x'], p3['y'])
return line_point.dist_to(point)
u = ((p3['x'] - p1['x'])*px + (p3['y'] - p1['y'])*py) / squared_distance
if u > 1:
u = 1
elif u < 0:
u = 0
x = p1['x'] + u * px
y = p1['y'] + u * py
dx = x - p3['x']
dy = y - p3['y']
# Note: If the actual distance does not matter,
# if you only want to compare what this function
# returns to other results of this function, you
# can just return the squared distance instead
# (i.e. remove the sqrt) to gain a little performance
dist = math.sqrt(dx*dx + dy*dy)
return dist | Calculate the distance from p3 to the stroke defined by p1 and p2.
The distance is the length of the perpendicular from p3 on p1.
Parameters
----------
p1 : dictionary with "x" and "y"
start of stroke
p2 : dictionary with "x" and "y"
end of stroke
p3 : dictionary with "x" and "y"
point |
def dist_to(self, p2):
return math.hypot(self.x - p2.x, self.y - p2.y) | Measure the distance to another point. |
def get_slope(self):
# y1 = m*x1 + t
# y2 = m*x2 + t => y1-y2 = m*(x1-x2) <=> m = (y1-y2)/(x1-x2)
return ((self.p1.y-self.p2.y) / (self.p1.x-self.p2.x)) | Return the slope m of this line segment. |
def get_offset(self):
return self.p1.y-self.get_slope()*self.p1.x | Get the offset t of this line segment. |
def count_selfintersections(self):
# This can be solved more efficiently with sweep line
counter = 0
for i, j in itertools.combinations(range(len(self.lineSegments)), 2):
inters = get_segments_intersections(self.lineSegments[i],
self.lineSegments[j])
if abs(i-j) > 1 and len(inters) > 0:
counter += 1
return counter | Get the number of self-intersections of this polygonal chain. |
def count_intersections(self, line_segments_b):
line_segments_a = self.lineSegments
# Calculate intersections
intersection_points = []
for line1, line2 in itertools.product(line_segments_a,
line_segments_b):
intersection_points += get_segments_intersections(line1, line2)
return len(set(intersection_points)) | Count the intersections of two strokes with each other.
Parameters
----------
line_segments_b : list
A list of line segemnts
Returns
-------
int
The number of intersections between A and B. |
def get_area(self):
return (self.p2.x-self.p1.x)*(self.p2.y-self.p1.y) | Calculate area of bounding box. |
def get_center(self):
return Point((self.p1.x+self.p2.x)/2.0, (self.p1.y+self.p2.y)/2.0) | Get the center point of this bounding box. |
def _fetch_data_from_server(raw_data_id, mysql_cfg):
import pymysql
import pymysql.cursors
# Import configuration file
cfg = utils.get_database_configuration()
if cfg is None:
return None
# Establish database connection
connection = pymysql.connect(host=cfg[mysql_cfg]['host'],
user=cfg[mysql_cfg]['user'],
passwd=cfg[mysql_cfg]['passwd'],
db=cfg[mysql_cfg]['db'],
cursorclass=pymysql.cursors.DictCursor)
logging.info("Connection: %s", str(connection))
cursor = connection.cursor()
# Download dataset
sql = ("SELECT `id`, `data` "
"FROM `wm_raw_draw_data` WHERE `id`=%i") % raw_data_id
cursor.execute(sql)
return cursor.fetchone() | Get the data from raw_data_id from the server.
:returns: The ``data`` if fetching worked, ``None`` if it failed. |
def _get_data_from_rawfile(path_to_data, raw_data_id):
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
for raw_dataset in raw_datasets:
if raw_dataset['handwriting'].raw_data_id == raw_data_id:
return raw_dataset['handwriting']
return None | Get a HandwrittenData object that has ``raw_data_id`` from a pickle file
``path_to_data``.
:returns: The HandwrittenData object if ``raw_data_id`` is in
path_to_data, otherwise ``None``. |
def _list_ids(path_to_data):
loaded = pickle.load(open(path_to_data, "rb"))
raw_datasets = loaded['handwriting_datasets']
raw_ids = {}
for raw_dataset in raw_datasets:
raw_data_id = raw_dataset['handwriting'].raw_data_id
if raw_dataset['formula_id'] not in raw_ids:
raw_ids[raw_dataset['formula_id']] = [raw_data_id]
else:
raw_ids[raw_dataset['formula_id']].append(raw_data_id)
for symbol_id in sorted(raw_ids):
print("%i: %s" % (symbol_id, sorted(raw_ids[symbol_id]))) | List raw data IDs grouped by symbol ID from a pickle file
``path_to_data``. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.