code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _get_description(prev_description): current_desc_file = os.path.join(utils.get_project_root(), prev_description['data-source'], "info.yml") if not os.path.isfile(current_desc_file): logging.error("You are probably not in the folder of a model, because " "%s is not a file.", current_desc_file) sys.exit(-1) with open(current_desc_file, 'r') as ymlfile: current_description = yaml.load(ymlfile) return current_description
Get the parsed description file (a dictionary) from another parsed description file.
def _get_system(model_folder): # Get model description model_description_file = os.path.join(model_folder, "info.yml") if not os.path.isfile(model_description_file): logging.error("You are probably not in the folder of a model, because " "%s is not a file. (-m argument)", model_description_file) sys.exit(-1) with open(model_description_file, 'r') as ymlfile: model_desc = yaml.load(ymlfile) # Get the feature and the preprocessing description feature_desc = _get_description(model_desc) preprocessing_desc = _get_description(feature_desc) return (preprocessing_desc, feature_desc, model_desc)
Return the preprocessing description, the feature description and the model description.
def display_data(raw_data_string, raw_data_id, model_folder, show_raw): print("## Raw Data (ID: %i)" % raw_data_id) print("```") print(raw_data_string) print("```") preprocessing_desc, feature_desc, _ = _get_system(model_folder) # Print model print("## Model") print("%s\n" % model_folder) # Get the preprocessing queue tmp = preprocessing_desc['queue'] preprocessing_queue = preprocessing.get_preprocessing_queue(tmp) # Get feature values as list of floats, rounded to 3 decimal places tmp = feature_desc['features'] feature_list = features.get_features(tmp) # Print preprocessing queue preprocessing.print_preprocessing_list(preprocessing_queue) features.print_featurelist(feature_list) # Get Handwriting recording = handwritten_data.HandwrittenData(raw_data_string, raw_data_id=raw_data_id) if show_raw: recording.show() recording.preprocessing(preprocessing_queue) feature_values = recording.feature_extraction(feature_list) feature_values = [round(el, 3) for el in feature_values] print("Features:") print(feature_values) # Get the list of data multiplication algorithms mult_queue = data_multiplication.get_data_multiplication_queue( feature_desc['data-multiplication']) # Multiply traing_set training_set = [{'id': 42, 'formula_id': 42, 'formula_in_latex': 'None', 'handwriting': recording}] training_set = create_ffiles.training_set_multiplication(training_set, mult_queue) # Display it logging.info("Show %i recordings...", len(training_set)) for recording in training_set: recording['handwriting'].show()
Print ``raw_data_id`` with the content ``raw_data_string`` after applying the preprocessing of ``model_folder`` to it.
def get_parser(): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--id", dest="id", default=292293, type=int, help="which RAW_DATA_ID do you want?") parser.add_argument("--mysql", dest="mysql", default='mysql_online', help="which mysql configuration should be used?") parser.add_argument("-m", "--model", dest="model", help="where is the model folder (with a info.yml)?", metavar="FOLDER", type=lambda x: utils.is_valid_folder(parser, x), default=utils.default_model()) parser.add_argument("-l", "--list", dest="list", help="list all raw data IDs / symbol IDs", action='store_true', default=False) parser.add_argument("-s", "--server", dest="server", help="contact the MySQL server", action='store_true', default=False) parser.add_argument("-r", "--raw", dest="show_raw", help="show the raw recording (without preprocessing)", action='store_true', default=False) return parser
Return the parser object for this script.
def main(list_ids, model, contact_server, raw_data_id, show_raw, mysql_cfg='mysql_online'): if list_ids: preprocessing_desc, _, _ = _get_system(model) raw_datapath = os.path.join(utils.get_project_root(), preprocessing_desc['data-source']) _list_ids(raw_datapath) else: if contact_server: data = _fetch_data_from_server(raw_data_id, mysql_cfg) print("hwrt version: %s" % hwrt.__version__) if data is not None: display_data(data['data'], data['id'], model, show_raw) else: logging.info("RAW_DATA_ID %i does not exist or " "database connection did not work.", raw_data_id) # The data was not on the server / the connection to the server did # not work. So try it again with the model data preprocessing_desc, _, _ = _get_system(model) raw_datapath = os.path.join(utils.get_project_root(), preprocessing_desc['data-source']) handwriting = _get_data_from_rawfile(raw_datapath, raw_data_id) if handwriting is None: logging.info("Recording with ID %i was not found in %s", raw_data_id, raw_datapath) else: print("hwrt version: %s" % hwrt.__version__) display_data(handwriting.raw_data_json, handwriting.formula_id, model, show_raw)
Main function of view.py.
def write_hw_scgink(hw, filename='mathbrush-test.txt'): with open(filename, 'w') as f: f.write('SCG_INK\n') f.write('%i\n' % len(hw.get_pointlist())) for stroke in hw.get_pointlist(): f.write('%i\n' % len(stroke)) for point in stroke: f.write('%i %i\n' % (point['x'], point['y']))
Parameters ---------- hw : HandwrittenData object filename : string Path, where the SCG INK file gets written
def get_parameters(folder): # Read the model description file with open(os.path.join(folder, "info.yml"), 'r') as ymlfile: preprocessing_description = yaml.load(ymlfile) # Get the path of the raw data raw_datapath = os.path.join(utils.get_project_root(), preprocessing_description['data-source']) # Get the path were the preprocessed file should be put outputpath = os.path.join(folder, "data.pickle") # Get the preprocessing queue tmp = preprocessing_description['queue'] preprocessing_queue = preprocessing.get_preprocessing_queue(tmp) return (raw_datapath, outputpath, preprocessing_queue)
Get the parameters of the preprocessing done within `folder`. Parameters ---------- folder : string Returns ------- tuple : (path of raw data, path where preprocessed data gets stored, list of preprocessing algorithms)
def create_preprocessed_dataset(path_to_data, outputpath, preprocessing_queue): # Log everything logging.info("Data soure %s", path_to_data) logging.info("Output will be stored in %s", outputpath) tmp = "Preprocessing Queue:\n" for preprocessing_class in preprocessing_queue: tmp += str(preprocessing_class) + "\n" logging.info(tmp) # Load from pickled file if not os.path.isfile(path_to_data): logging.info(("'%s' does not exist. Please either abort this script " "or update the data location."), path_to_data) raw_dataset_path = utils.choose_raw_dataset() # Get project-relative path raw_dataset_path = "raw-datasets" + \ raw_dataset_path.split("raw-datasets")[1] print(raw_dataset_path) sys.exit() # TODO: Update model! logging.info("Start loading data...") loaded = pickle.load(open(path_to_data, "rb")) raw_datasets = loaded['handwriting_datasets'] logging.info("Start applying preprocessing methods") start_time = time.time() for i, raw_dataset in enumerate(raw_datasets): if i % 10 == 0 and i > 0: utils.print_status(len(raw_datasets), i, start_time) # Do the work raw_dataset['handwriting'].preprocessing(preprocessing_queue) sys.stdout.write("\r%0.2f%% (done)\033[K\n" % (100)) print("") pickle.dump({'handwriting_datasets': raw_datasets, 'formula_id2latex': loaded['formula_id2latex'], 'preprocessing_queue': preprocessing_queue}, open(outputpath, "wb"), 2)
Create a preprocessed dataset file by applying `preprocessing_queue` to `path_to_data`. The result will be stored in `outputpath`.
def main(folder): raw_datapath, outputpath, p_queue = get_parameters(folder) create_preprocessed_dataset(raw_datapath, outputpath, p_queue) utils.create_run_logfile(folder)
Main part of preprocess_dataset that glues things togeter.
def _create_index_formula_lookup(formula_id2index, feature_folder, index2latex): index2formula_id = sorted(formula_id2index.items(), key=lambda n: n[1]) index2formula_file = os.path.join(feature_folder, "index2formula_id.csv") with open(index2formula_file, "w") as f: f.write("index,formula_id,latex\n") for formula_id, index in index2formula_id: f.write("%i,%i,%s\n" % (index, formula_id, index2latex[index]))
Create a lookup file where the index is mapped to the formula id and the LaTeX command. Parameters ---------- formula_id2index : dict feature_folder : str Path to a folder in which a feature file as well as an index2formula_id.csv is. index2latex : dict Maps an integer index to a LaTeX command
def _create_translation_file(feature_folder, dataset_name, translation, formula_id2index): translationfilename = "%s/translation-%s.csv" % (feature_folder, dataset_name) with open(translationfilename, "w") as f: f.write("index,raw_data_id,latex,formula_id\n") for el in translation: f.write("%i,%i,%s,%i\n" % (formula_id2index[el[2]], el[0], el[1], el[2]))
Write a loop-up file that contains the direct (record-wise) lookup information. Parameters ---------- feature_folder : Path to the feature files. dataset_name : 'traindata', 'validdata' or 'testdata'. translation : list of triples (raw data id, formula in latex, formula id)
def training_set_multiplication(training_set, mult_queue): logging.info("Multiply data...") for algorithm in mult_queue: new_trning_set = [] for recording in training_set: samples = algorithm(recording['handwriting']) for sample in samples: new_trning_set.append({'id': recording['id'], 'is_in_testset': 0, 'formula_id': recording['formula_id'], 'handwriting': sample, 'formula_in_latex': recording['formula_in_latex']}) training_set = new_trning_set return new_trning_set
Multiply the training set by all methods listed in mult_queue. Parameters ---------- training_set : set of all recordings that will be used for training mult_queue : list of all algorithms that will take one recording and generate more than one. Returns ------- mutliple recordings
def _calculate_feature_stats(feature_list, prepared, serialization_file): # pylint: disable=R0914 # Create feature only list feats = [x for x, _ in prepared] # Label is not necessary # Calculate all means / mins / maxs means = numpy.mean(feats, 0) mins = numpy.min(feats, 0) maxs = numpy.max(feats, 0) # Calculate, min, max and mean vector for each feature with # normalization start = 0 mode = 'w' arguments = {'newline': ''} if sys.version_info.major < 3: mode += 'b' arguments = {} with open(serialization_file, mode, **arguments) as csvfile: spamwriter = csv.writer(csvfile, delimiter=str(';'), quotechar=str('"'), quoting=csv.QUOTE_MINIMAL) for feature in feature_list: end = start + feature.get_dimension() # append the data to the feature class feature.mean = numpy.array(means[start:end]) feature.min = numpy.array(mins[start:end]) feature.max = numpy.array(maxs[start:end]) start = end for mean, fmax, fmin in zip(feature.mean, feature.max, feature.min): spamwriter.writerow([mean, fmax - fmin])
Calculate min, max and mean for each feature. Store it in object.
def _normalize_features(feature_list, prepared, is_traindata): if is_traindata: _calculate_feature_stats(feature_list, prepared, "featurenormalization.csv") start = 0 for feature in feature_list: end = start + feature.get_dimension() # For every instance in the dataset: Normalize! for i in range(len(prepared)): # The 0 is necessary as every element is (x, y) feature_range = (feature.max - feature.min) if feature_range == 0: feature_range = 1 prepared[i][0][start:end] = (prepared[i][0][start:end] - feature.mean) / feature_range start = end return prepared
Normalize features (mean subtraction, division by variance or range).
def prepare_dataset(dataset, formula_id2index, feature_list, is_traindata, do_normalization=False): prepared = [] start_time = time.time() translation = [] for i, data in enumerate(dataset): x = [] handwriting = data['handwriting'] x = handwriting.feature_extraction(feature_list) # Feature selection y = formula_id2index[data['formula_id']] # Get label translation.append((handwriting.raw_data_id, handwriting.formula_in_latex, handwriting.formula_id)) prepared.append((numpy.array(x), y)) if i % 100 == 0 and i > 0: utils.print_status(len(dataset), i, start_time) sys.stdout.write("\r100%" + " "*80 + "\n") sys.stdout.flush() # Feature normalization if do_normalization: _normalize_features(feature_list, prepared, is_traindata) return (prepared, translation)
Transform each instance of dataset to a (Features, Label) tuple.
def make_hdf5(dataset_name, feature_count, data, output_filename, create_learning_curve): # create raw data file for hdf5_create if dataset_name == "traindata" and create_learning_curve: max_trainingexamples = 501 output_filename_save = output_filename steps = 10 for trainingexamples in range(100, max_trainingexamples, steps): # adjust output_filename tmp = output_filename_save.split(".") tmp[-2] += "-%i-examples" % trainingexamples output_filename = ".".join(map(str, tmp)) # Make sure the data has not more than ``trainingexamples`` seen_symbols = defaultdict(int) new_data = {} for feature_string, label in data: if seen_symbols[label] < trainingexamples: seen_symbols[label] += 1 new_data = (feature_string, label) # Create the hdf5 file utils.create_hdf5(output_filename, feature_count, new_data) else: utils.create_hdf5(output_filename, feature_count, data)
Create the hdf5 file. Parameters ---------- filename : name of the file that hdf5_create will use to create the hdf5 file. feature_count : integer number of features data : list of tuples data format ('feature_string', 'label')
def _get_symbol_index(stroke_id_needle, segmentation): for symbol_index, symbol in enumerate(segmentation): if stroke_id_needle in symbol: return symbol_index return None
Parameters ---------- stroke_id_needle : int Identifier for the stroke of which the symbol should get found. segmentation : list of lists of integers An ordered segmentation of strokes to symbols. Returns ------- The symbol index in which stroke_id_needle occurs Examples -------- >>> _get_symbol_index(3, [[0, 1, 2], [3, 4, 5], [6, 7]]) 1 >>> _get_symbol_index(6, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2 >>> _get_symbol_index(7, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2
def get_segmented_raw_data(top_n=10000): cfg = utils.get_database_configuration() mysql = cfg['mysql_online'] connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("SELECT `id`, `data`, `segmentation` " "FROM `wm_raw_draw_data` WHERE " "(`segmentation` IS NOT NULL OR `accepted_formula_id` IS NOT NULL) " "AND `wild_point_count` = 0 " "AND `stroke_segmentable` = 1 " "ORDER BY `id` LIMIT 0, %i") % top_n logging.info(sql) cursor.execute(sql) datasets = cursor.fetchall() logging.info("Fetched %i recordings. Add missing segmentations.", len(datasets)) for i in range(len(datasets)): if datasets[i]['segmentation'] is None: stroke_count = len(json.loads(datasets[i]['data'])) if stroke_count > 10: print("Massive stroke count! %i" % stroke_count) datasets[i]['segmentation'] = str([[s for s in range(stroke_count)]]) return datasets
Fetch data from the server. Parameters ---------- top_n : int Number of data sets which get fetched from the server.
def filter_recordings(recordings): new_recordings = [] for recording in recordings: recording['data'] = json.loads(recording['data']) tmp = json.loads(recording['segmentation']) recording['segmentation'] = normalize_segmentation(tmp) had_none = False for stroke in recording['data']: for point in stroke: if point['time'] is None: logging.debug("Had None-time: %i", recording['id']) had_none = True break if had_none: break if not had_none: new_recordings.append(recording) recordings = new_recordings logging.info("Done filtering") return recordings
Remove all recordings which have points without time. Parameters ---------- recordings : list of dicts Each dictionary has the keys 'data' and 'segmentation' Returns ------- list of dicts : Only recordings where all points have time values.
def get_nn_classifier(X, y): assert type(X) is numpy.ndarray assert type(y) is numpy.ndarray assert len(X) == len(y) assert X.dtype == 'float32' assert y.dtype == 'int32' nn_pickled_filename = 'is_one_symbol_classifier.pickle' if os.path.isfile(nn_pickled_filename): with open(nn_pickled_filename, 'rb') as handle: get_output = pickle.load(handle) else: get_output = train_nn_segmentation_classifier(X, y) with open(nn_pickled_filename, 'wb') as handle: pickle.dump(get_output, handle, protocol=pickle.HIGHEST_PROTOCOL) return get_output
Train a neural network classifier. Parameters ---------- X : numpy array A list of feature vectors y : numpy array A list of labels Returns ------- Theano expression : The trained neural network
def get_stroke_features(recording, strokeid1, strokeid2): stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] assert isinstance(stroke1, list), "stroke1 is a %s" % type(stroke1) X_i = [] for s in [stroke1, stroke2]: hw = HandwrittenData(json.dumps([s])) feat1 = features.ConstantPointCoordinates(strokes=1, points_per_stroke=20, fill_empty_with=0) feat2 = features.ReCurvature(strokes=1) feat3 = features.Ink() X_i += hw.feature_extraction([feat1, feat2, feat3]) X_i += [get_strokes_distance(stroke1, stroke2)] # Distance of strokes X_i += [get_time_distance(stroke1, stroke2)] # Time in between X_i += [abs(strokeid2-strokeid1)] # Strokes in between # X_i += [get_black_percentage()] return X_i
Get the features used to decide if two strokes belong to the same symbol or not. Parameters ---------- recording : list A list of strokes strokeid1 : int strokeid2 : int Returns ------- list : A list of features which could be useful to decide if stroke1 and stroke2 belong to the same symbol.
def merge_segmentations(segs1, segs2, strokes=None): def translate(segmentation, strokes): t = [] for symbol in segmentation: symbol_new = [] for stroke in symbol: symbol_new.append(strokes[stroke]) t.append(symbol_new) return t if strokes is None: strokes = [i for i in range(len(segs2[0][0]))] topf = partitions.TopFinder(500) for s1, s2 in itertools.product(segs1, segs2): topf.push(s1[0]+translate(s2[0], strokes), s1[1]*s2[1]) return list(topf)
Parameters ---------- segs1 : a list of tuples Each tuple is a segmentation with its score segs2 : a list of tuples Each tuple is a segmentation with its score strokes : list of stroke names for segs2 Returns ------- list of tuples : Segmentations with their score, combined from segs1 and segs2
def get_mst_wood(recording, single_clf): points = get_points(recording) mst = get_mst(points) mst_wood = [{'mst': mst, 'strokes': list(range(len(mst)))}] # TODO: break mst into wood of msts wherever possible by recognizing single # symbols by stroke bbintersections = get_bb_intersections(recording) for i, stroke in enumerate(recording): # TODO predictions = single_clf.predict({'id': 0, 'data': [stroke]}) # TODO predictions[:20] prob_sum = sum([p['probability'] for p in predictions[:1]]) # dots cannot be segmented into single symbols at this point if prob_sum > 0.95 and not any([el for el in bbintersections[i]]) \ and len(stroke) > 2 \ and predictions[0]['semantics'].split(';')[1] != '-': # Split mst here split_mst_index, split_node_i = find_split_node(mst_wood, i) mst_wood_tmp = break_mst(mst_wood[split_mst_index], split_node_i) del mst_wood[split_mst_index] for mst in mst_wood_tmp: mst_wood.append(mst) for mst in mst_wood: if i in mst['strokes']: mst['pred'] = predictions[0]['semantics'].split(';')[1] # if any([True for mst in mst_wood if len(mst['strokes']) >= 8]): # logging.debug([mst['pred'] for mst in mst_wood if 'pred' in mst]) # HandwrittenData(json.dumps(recording)).show() return mst_wood
Parameters ---------- recording : A list of lists Each sublist represents a stroke single_clf : object A classifier for single symbols - it only says "True" when a stroke is a single symbol or "False" when a stroke is only part of a symbol. Returns ------- list A list of lists. Each sub-list is at least one symbol, but might be more.
def has_missing_break(real_seg, pred_seg): for symbol_pred in pred_seg: for symbol_real in real_seg: if symbol_pred[0] in symbol_real: for stroke in symbol_pred: if stroke not in symbol_real: return True return False
Parameters ---------- real_seg : list of integers The segmentation as it should be. pred_seg : list of integers The predicted segmentation. Returns ------- bool : True, if strokes of two different symbols are put in the same symbol.
def has_wrong_break(real_seg, pred_seg): for symbol_real in real_seg: for symbol_pred in pred_seg: if symbol_real[0] in symbol_pred: for stroke in symbol_real: if stroke not in symbol_pred: return True return False
Parameters ---------- real_seg : list of integers The segmentation as it should be. pred_seg : list of integers The predicted segmentation. Returns ------- bool : True, if strokes of one symbol were segmented to be in different symbols.
def find_split_node(mst_wood, i): for mst_index, mst in enumerate(mst_wood): if i in mst['strokes']: return (mst_index, mst['strokes'].index(i)) raise ValueError('%i was not found as stroke index.' % i)
Parameters ---------- mst_wood : list of dictionarys i : int Number of the stroke where one mst gets split Returns ------- tuple : (mst index, node index)
def break_mst(mst, i): for j in range(len(mst['mst'])): mst['mst'][i][j] = 0 mst['mst'][j][i] = 0 _, components = scipy.sparse.csgraph.connected_components(mst['mst']) comp_indices = {} for el in set(components): comp_indices[el] = {'strokes': [], 'strokes_i': []} for i, comp_nr in enumerate(components): comp_indices[comp_nr]['strokes'].append(mst['strokes'][i]) comp_indices[comp_nr]['strokes_i'].append(i) mst_wood = [] for key in comp_indices: matrix = [] for i, line in enumerate(mst['mst']): line_add = [] if i not in comp_indices[key]['strokes_i']: continue for j, el in enumerate(line): if j in comp_indices[key]['strokes_i']: line_add.append(el) matrix.append(line_add) assert len(matrix) > 0, \ ("len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" % (comp_indices[key]['strokes'], mst, i)) assert len(matrix) == len(matrix[0]), \ ("matrix was %i x %i, but should be square" % (len(matrix), len(matrix[0]))) assert len(matrix) == len(comp_indices[key]['strokes']), \ (("stroke length was not equal to matrix length " "(strokes=%s, len(matrix)=%i)") % (comp_indices[key]['strokes'], len(matrix))) mst_wood.append({'mst': matrix, 'strokes': comp_indices[key]['strokes']}) return mst_wood
Break mst into multiple MSTs by removing one node i. Parameters ---------- mst : symmetrical square matrix i : index of the mst where to break Returns ------- list of dictionarys ('mst' and 'strokes' are the keys)
def _is_out_of_order(segmentation): last_stroke = -1 for symbol in segmentation: for stroke in symbol: if last_stroke > stroke: return True last_stroke = stroke return False
Check if a given segmentation is out of order. Examples -------- >>> _is_out_of_order([[0, 1, 2, 3]]) False >>> _is_out_of_order([[0, 1], [2, 3]]) False >>> _is_out_of_order([[0, 1, 3], [2]]) True
def get_points(recording): points = [] for stroke in recording: point = geometry.get_bounding_box(stroke).get_center() points.append(point) return points
Get one point for each stroke in a recording. The point represents the strokes spacial position (e.g. the center of the bounding box). Parameters ---------- recording : list of strokes Returns ------- list : points
def get_bb_intersections(recording): intersections = numpy.zeros((len(recording), len(recording)), dtype=bool) for i in range(len(recording)-1): a = geometry.get_bounding_box(recording[i]).grow(0.2) for j in range(i+1, len(recording)): b = geometry.get_bounding_box(recording[j]).grow(0.2) intersections[i][j] = geometry.do_bb_intersect(a, b) intersections[j][i] = intersections[i][j] return intersections
Get all intersections of the bounding boxes of strokes. Parameters ---------- recording : list of lists of integers Returns ------- A symmetrical matrix which indicates if two bounding boxes intersect.
def get_mst(points): graph = Graph() for point in points: graph.add_node(point) graph.generate_euclidean_edges() matrix = scipy.sparse.csgraph.minimum_spanning_tree(graph.w) mst = matrix.toarray().astype(int) # returned matrix is not symmetrical! make it symmetrical for i in range(len(mst)): for j in range(len(mst)): if mst[i][j] > 0: mst[j][i] = mst[i][j] if mst[j][i] > 0: mst[i][j] = mst[j][i] return mst
Parameters ---------- points : list of points (geometry.Point) The first element of the list is the center of the bounding box of the first stroke, the second one belongs to the seconds stroke, ... Returns ------- mst : square matrix 0 nodes the edges are not connected, > 0 means they are connected
def predict(self, parsed_json): evaluate = utils.evaluate_model_single_recording_preloaded results = evaluate(self.preprocessing_queue, self.feature_list, self.model, self.output_semantics, json.dumps(parsed_json['data']), parsed_json['id']) return results
Parameters ---------- parsed_json : dict with keys 'data' and 'id', where 'data' contains a recording and 'id' is the id on write-math.com for debugging purposes
def add_node(self, payload): self.nodes.append(Node(len(self.nodes), payload)) return len(self.nodes) - 1
Returns ------- int Identifier for the inserted node.
def p_strokes(symbol, count): global stroke_prob assert count >= 1 epsilon = 0.00000001 if stroke_prob is None: misc_path = pkg_resources.resource_filename('hwrt', 'misc/') stroke_prob_file = os.path.join(misc_path, 'prob_stroke_count_by_symbol.yml') with open(stroke_prob_file, 'r') as stream: stroke_prob = yaml.load(stream) if symbol in stroke_prob: if count in stroke_prob[symbol]: return stroke_prob[symbol][count] else: return epsilon return epsilon
Get the probability of a written `symbol` having `count` strokes. Parameters ---------- symbol : str LaTeX command count : int, >= 1 Returns ------- float In [0.0, 1.0]
def _calc_hypothesis_probability(hypothesis): prob = 0.0 for symbol, seg in zip(hypothesis['symbols'], hypothesis['segmentation']): # symbol_latex = symbol['symbol'].split(";")[1] # TODO: Does p_strokes really improve the system? prob += symbol['probability'] # * p_strokes(symbol_latex, len(seg)) # Use language model to update probabilities pure_symbols = [symbol['symbol'].split(";")[1] for symbol in hypothesis['symbols']] pure_symbols = ["<s>"] + pure_symbols + ["</s>"] lm_prob = language_model.get_probability(pure_symbols) hypothesis['lm_probability'] = 2**lm_prob return (prob * float(hypothesis['lm_probability']) * (1.0 / len(hypothesis['segmentation'])))
Get the probability (or rather a score) of a hypothesis. Parameters ---------- hypothesis : dict with keys 'segmentation', 'symbols', ... Returns ------- float in [0.0, 1.0]
def build_unicode(hyp): latex = [] for symbol in hyp['symbols']: latex.append(symbol['symbol']) return ";;".join(latex)
Parameters ---------- hyp : dict {'segmentation': [[0, 3], [1, 2]], 'symbols': [{'symbol': ID, 'probability': 0.12}], 'geometry': {'symbol': index, 'bottom': None or dict, 'subscript': None or dict, 'right': None or dict, 'superscript': None or dict, 'top': None or dict}, 'probability': 0.123 }
def build_latex(hyp): latex = [] for symbol in hyp['symbols']: latex.append(symbol['symbol'].split(";")[1]) return " ".join(latex)
Parameters ---------- hyp : dict {'segmentation': [[0, 3], [1, 2]], 'symbols': [{'symbol': ID, 'probability': 0.12}], 'geometry': {'symbol': index, 'bottom': None or dict, 'subscript': None or dict, 'right': None or dict, 'superscript': None or dict, 'top': None or dict}, 'probability': 0.123 }
def _prune(self): self.hypotheses = sorted(self.hypotheses, key=lambda e: e['probability'], reverse=True)[:self.k]
Shorten hypotheses to the best k ones.
def get_writemath_results(self): results = [] for hyp in self.hypotheses: symbols = [] for sym in hyp['symbols']: symbols.append({'id': sym['symbol'].split(';')[0], 'probability': sym['probability']}) results.append({'probability': float(hyp['probability']), 'segmentation': hyp['segmentation'], 'symbols': symbols}) return results
Get the result in the format [{'probability': 0.987, 'segmentation': [[0, 1], [2, 4], [3]]} // index of the stroke 'symbols': [{'id': 456, // on write-math.com 'probability': 0.123}, {'id': 456, 'probability': 0.999}, // The sum does not have to be 1 {'id': 195, 'probability': 0.0001}] }, {...} // another hypothesis ]
def get_matrices(): with open('hwrt/misc/is_one_symbol_classifier.pickle', 'rb') as f: a = pickle.load(f) arrays = [] for el1 in a.input_storage: for el2 in el1.__dict__['storage']: if isinstance(el2, cuda.CudaNdarray): arrays.append({'storage': numpy.asarray(el2), 'name': el1.name}) else: logging.warning("was type %s. Do nothing." % type(el2)) logging.debug(el1.name) return arrays
Get the matrices from a pickled files. Returns ------- list List of all matrices.
def create_model_tar(matrices, tarname="model-cuda-converted.tar"): # Write layers filenames = [] for layer in range(len(matrices)): if matrices[layer]['name'] == 'W': weights = matrices[layer]['storage'] weights_file = h5py.File('W%i.hdf5' % (layer / 2), 'w') weights_file.create_dataset(weights_file.id.name, data=weights) weights_file.close() filenames.append('W%i.hdf5' % (layer / 2)) elif matrices[layer]['name'] == 'b': b = matrices[layer]['storage'] bfile = h5py.File('b%i.hdf5' % (layer / 2), 'w') bfile.create_dataset(bfile.id.name, data=b) bfile.close() filenames.append('b%i.hdf5' % (layer / 2)) # activation = a['layers'][layer]['_props']['activation'] # activation = activation.replace('sigmoid', 'Sigmoid') # activation = activation.replace('softmax', 'Softmax') # layers.append({'W': {'size': list(W.shape), # 'filename': 'W%i.hdf5' % layer}, # 'b': {'size': list(b.shape), # 'filename': 'b%i.hdf5' % layer}, # 'activation': activation}) with tarfile.open(tarname, "w:") as tar: for name in filenames: tar.add(name) # Remove temporary files which are now in tar file for filename in filenames: os.remove(filename)
Create a tar file which contains the model. Parameters ---------- matrices : list tarname : str Target file which will be created.
def read_folder(folder): hwr_objects = [] for filepath in natsort.natsorted(glob.glob("%s/*.inkml" % folder)): tmp = inkml.read(filepath) for hwr in tmp.to_single_symbol_list(): hwr_objects.append(hwr) logging.info("Done reading formulas") save_raw_pickle(hwr_objects) return hwr_objects
Parameters ---------- folder : str Returns ------- list of HandwrittenData objects
def save_raw_pickle(hwr_objects): converted_hwr = [] translate = {} translate_id = {} model_path = pkg_resources.resource_filename('hwrt', 'misc/') translation_csv = os.path.join(model_path, 'latex2writemathindex.csv') arguments = {'newline': '', 'encoding': 'utf8'} with open(translation_csv, 'rt', **arguments) as csvfile: contents = csvfile.read() lines = contents.split("\n") for csvrow in lines: csvrow = csvrow.split(',') if len(csvrow) == 1: writemathid = csvrow[0] latex = "" else: writemathid, latex = int(csvrow[0]), csvrow[1:] latex = ','.join(latex) translate[latex] = writemathid translate_id[writemathid] = latex for hwr in hwr_objects: hwr.formula_in_latex = translate_id[hwr.formula_id] formula_id2latex = {} for el in hwr_objects: if el.formula_id not in formula_id2latex: formula_id2latex[el.formula_id] = el.formula_in_latex for hwr in hwr_objects: hwr.formula_in_latex = translate_id[hwr.formula_id] hwr.raw_data_id = 42 converted_hwr.append({'is_in_testset': 0, 'formula_id': hwr.formula_id, 'handwriting': hwr, 'id': 42, 'formula_in_latex': hwr.formula_in_latex}) with open('crohme.pickle', 'wb') as f: pickle.dump({'formula_id2latex': formula_id2latex, 'handwriting_datasets': converted_hwr}, f, protocol=pickle.HIGHEST_PROTOCOL)
Parameters ---------- hwr_objects : list of hwr objects
def check_python_version(): # Required due to multiple with statements on one line req_version = (2, 7) cur_version = sys.version_info if cur_version >= req_version: print("Python version... %sOK%s (found %s, requires %s)" % (Bcolors.OKGREEN, Bcolors.ENDC, str(platform.python_version()), str(req_version[0]) + "." + str(req_version[1]))) else: print("Python version... %sFAIL%s (found %s, requires %s)" % (Bcolors.FAIL, Bcolors.ENDC, str(cur_version), str(req_version)))
Check if the currently running Python version is new enough.
def check_executables(): print("\033[1mCheck executables\033[0m") required_executables = [utils.get_nntoolkit()] for executable in required_executables: path = which(executable) if path is None: print("%s ... %sNOT%s found" % (executable, Bcolors.WARNING, Bcolors.ENDC)) else: print("%s ... %sfound%s at %s" % (executable, Bcolors.OKGREEN, Bcolors.ENDC, path))
Check if all necessary / recommended executables are installed.
def main(): check_python_version() check_python_modules() check_executables() home = os.path.expanduser("~") print("\033[1mCheck files\033[0m") rcfile = os.path.join(home, ".hwrtrc") if os.path.isfile(rcfile): print("~/.hwrtrc... %sFOUND%s" % (Bcolors.OKGREEN, Bcolors.ENDC)) else: print("~/.hwrtrc... %sNOT FOUND%s" % (Bcolors.FAIL, Bcolors.ENDC)) misc_path = pkg_resources.resource_filename('hwrt', 'misc/') print("misc-path: %s" % misc_path)
Execute all checks.
def main(dataset1, dataset2, target): d1 = read_raw(dataset1) d2 = read_raw(dataset2) merged = merge(d1, d2) with open(target, 'wb') as f: pickle.dump(merged, f, protocol=pickle.HIGHEST_PROTOCOL)
Parameters ---------- dataset1 : str dataset2 : str target : str
def read_raw(data_path): with open(data_path, 'rb') as f: data = pickle.load(f) return data
Parameters ---------- data_path : str
def merge(d1, d2): if d1['formula_id2latex'] is None: formula_id2latex = {} else: formula_id2latex = d1['formula_id2latex'].copy() formula_id2latex.update(d2['formula_id2latex']) handwriting_datasets = d1['handwriting_datasets'] for dataset in d2['handwriting_datasets']: handwriting_datasets.append(dataset) return {'formula_id2latex': formula_id2latex, 'handwriting_datasets': handwriting_datasets}
Merge two raw datasets into one. Parameters ---------- d1 : dict d2 : dict Returns ------- dict
def beautify_xml(path): with open(path) as f: content = f.read() pretty_print = lambda data: '\n'.join([line for line in parseString(data) .toprettyxml(indent=' ' * 2) .split('\n') if line.strip()]) return pretty_print(content)
Beautify / pretty print XML in `path`. Parameters ---------- path : str Returns ------- str
def read_folder(folder): import glob recordings = [] for filename in natsorted(glob.glob("%s/*.inkml" % folder)): hw = read(filename) if hw.formula_in_latex is not None: hw.formula_in_latex = hw.formula_in_latex.strip() if hw.formula_in_latex is None or \ not hw.formula_in_latex.startswith('$') or \ not hw.formula_in_latex.endswith('$'): if hw.formula_in_latex is not None: logging.info("Starts with: %s", str(hw.formula_in_latex.startswith('$'))) logging.info("ends with: %s", str(hw.formula_in_latex.endswith('$'))) logging.info(hw.formula_in_latex) logging.info(hw.segmentation) hw.show() recordings.append(hw) return recordings
Parameters ---------- folder : string Path to a folde with *.inkml files. Returns ------- list : Objects of the type HandwrittenData
def is_file_consistent(local_path_file, md5_hash): return os.path.isfile(local_path_file) and \ hashlib.md5(open(local_path_file, 'rb').read()).hexdigest() == md5_hash
Check if file is there and if the md5_hash is correct.
def get_parser(): from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) return parser
Return the parser object for this script.
def main(): # Read config file. This has to get updated via git project_root = utils.get_project_root() infofile = os.path.join(project_root, "raw-datasets/info.yml") logging.info("Read '%s'...", infofile) with open(infofile, 'r') as ymlfile: datasets = yaml.load(ymlfile) for dataset in datasets: local_path_file = os.path.join(project_root, dataset['online_path']) i = 0 while not is_file_consistent(local_path_file, dataset['md5']) and i < 3: if os.path.isfile(local_path_file): local_file_size = os.path.getsize(local_path_file) logging.info("MD5 codes differ. ") logging.info("The file size of the downloaded file is %s.", utils.sizeof_fmt(local_file_size)) logging.info("Download the file '%s'...", dataset['online_path']) urllib.urlretrieve(dataset['url'], local_path_file) i += 1 if i < 10: logging.info("Found '%s'.", dataset['online_path'])
Main part of the download script.
def load_model(): logging.info("Load language model...") ngram_arpa_t = pkg_resources.resource_filename('hwrt', 'misc/ngram.arpa.tar.bz2') with tarfile.open(ngram_arpa_t, 'r:bz2') as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) ngram_arpa_f = os.path.join(tarfolder, 'ngram.arpa') with open(ngram_arpa_f) as f: content = f.read() ngram_model = NgramLanguageModel() ngram_model.load_from_arpa_str(content) return ngram_model
Load a n-gram language model for mathematics in ARPA format which gets shipped with hwrt. Returns ------- A NgramLanguageModel object
def get_trigram_log_prob(self, trigram): w1, w2, w3 = trigram # if w1 not in self.ngrams[1]['data']: # w1 = "<unk>" # if w2 not in self.ngrams[1]['data']: # w2 = "<unk>" # if w3 not in self.ngrams[1]['data']: # w3 = "<unk>" if w1 in self.ngrams[3]['data']: if w2 in self.ngrams[3]['data'][w1]: if w3 in self.ngrams[3]['data'][w1][w2]: return self.ngrams[3]['data'][w1][w2][w3] return (Decimal(1.0).log10() - Decimal(int(self.ngrams[1]['count'])**2).log10())
Calcualate the probability P(w1, w2, w3), given this language model. Parameters ---------- trigram tuple with exactly 3 elements Returns ------- numeric The log likelihood of P(w3 | (w1, w2))
def get_probability(self, sentence): if len(sentence) == 1: return Decimal(10)**self.get_unigram_log_prob(sentence) elif len(sentence) == 2: return Decimal(10)**self.get_bigram_log_prob(sentence) else: log_prob = Decimal(0.0) for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]): log_prob += self.get_trigram_log_prob((w1, w2, w3)) log_prob = Decimal(log_prob) return Decimal(10)**log_prob
Calculate the probability of a sentence, given this language model. Get P(sentence) = P(w1, w2, w3, ..., wn) = P(w1, w2, w3) * P(w2, w3, w4) *...* P(wn-2, wn-1, wn) Parameters ---------- sentence : list A list of strings / tokens.
def evaluate_dir(sample_dir): results = [] if sample_dir[-1] == "/": sample_dir = sample_dir[:-1] for filename in glob.glob("%s/*.inkml" % sample_dir): results.append(evaluate_inkml(filename)) return results
Evaluate all recordings in `sample_dir`. Parameters ---------- sample_dir : string The path to a directory with *.inkml files. Returns ------- list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability'
def evaluate_inkml(inkml_file_path): logging.info("Start evaluating '%s'...", inkml_file_path) ret = {'filename': inkml_file_path} recording = inkml.read(inkml_file_path) results = evaluate(json.dumps(recording.get_sorted_pointlist()), result_format='LaTeX') ret['results'] = results return ret
Evaluate an InkML file. Parameters ---------- inkml_file_path : string path to an InkML file Returns ------- dictionary The dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'semantics' (which contains the latex command) and 'probability'
def generate_output_csv(evaluation_results, filename='results.csv'): with open(filename, 'w') as f: for result in evaluation_results: for i, entry in enumerate(result['results']): if entry['semantics'] == ',': result['results']['semantics'] = 'COMMA' f.write("%s, " % result['filename']) f.write(", ".join([entry['semantics'] for entry in result['results']])) f.write("\n") f.write("%s, " % "scores") f.write(", ".join([str(entry['probability']) for entry in result['results']])) f.write("\n")
Generate the evaluation results in the format Parameters ---------- evaluation_results : list of dictionaries Each dictionary contains the keys 'filename' and 'results', where 'results' itself is a list of dictionaries. Each of the results has the keys 'latex' and 'probability' Examples -------- MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1 MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
def print_status(total, current, start_time=None): percentage_done = float(current) / total sys.stdout.write("\r%0.2f%% " % (percentage_done * 100)) if start_time is not None: current_running_time = time.time() - start_time remaining_seconds = current_running_time / percentage_done tmp = datetime.timedelta(seconds=remaining_seconds) sys.stdout.write("(%s remaining) " % str(tmp)) sys.stdout.flush()
Show how much work was done / how much work is remaining. Parameters ---------- total : float The total amount of work current : float The work that has been done so far start_time : int The start time in seconds since 1970 to estimate the remaining time.
def is_valid_file(parser, arg): arg = os.path.abspath(arg) if not os.path.exists(arg): parser.error("The file %s does not exist!" % arg) else: return arg
Check if arg is a valid file that already exists on the file system.
def is_valid_folder(parser, arg): arg = os.path.abspath(arg) if not os.path.isdir(arg): parser.error("The folder %s does not exist!" % arg) else: return arg
Check if arg is a valid file that already exists on the file system.
def get_project_configuration(): home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") if not os.path.isfile(rcfile): create_project_configuration(rcfile) with open(rcfile, 'r') as ymlfile: cfg = yaml.load(ymlfile) return cfg
Get project configuration as dictionary.
def create_project_configuration(filename): home = os.path.expanduser("~") project_root_folder = os.path.join(home, "hwr-experiments") config = {'root': project_root_folder, 'nntoolkit': None, 'dropbox_app_key': None, 'dropbox_app_secret': None, 'dbconfig': os.path.join(home, "hwrt-config/db.config.yml"), 'data_analyzation_queue': [{'Creator': None}], 'worker_api_key': '1234567890abc', 'environment': 'development'} with open(filename, 'w') as f: yaml.dump(config, f, default_flow_style=False)
Create a project configuration file which contains a configuration that might make sense.
def get_project_root(): cfg = get_project_configuration() # At this point it can be sure that the configuration file exists # Now make sure the project structure exists for dirname in ["raw-datasets", "preprocessed", "feature-files", "models", "reports"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) raw_yml_path = pkg_resources.resource_filename('hwrt', 'misc/') # TODO: How to check for updates if it already exists? raw_data_dst = os.path.join(cfg['root'], "raw-datasets/info.yml") if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, "info.yml") shutil.copy(raw_yml_pkg_src, raw_data_dst) # Make sure small-baseline folders exists for dirname in ["models/small-baseline", "feature-files/small-baseline", "preprocessed/small-baseline"]: directory = os.path.join(cfg['root'], dirname) if not os.path.exists(directory): os.makedirs(directory) # Make sure small-baseline yml files exist paths = [("preprocessed/small-baseline/", "preprocessing-small-info.yml"), ("feature-files/small-baseline/", "feature-small-info.yml"), ("models/small-baseline/", "model-small-info.yml")] for dest, src in paths: raw_data_dst = os.path.join(cfg['root'], "%s/info.yml" % dest) if not os.path.isfile(raw_data_dst): raw_yml_pkg_src = os.path.join(raw_yml_path, src) shutil.copy(raw_yml_pkg_src, raw_data_dst) return cfg['root']
Get the project root folder as a string.
def get_template_folder(): cfg = get_project_configuration() if 'templates' not in cfg: home = os.path.expanduser("~") rcfile = os.path.join(home, ".hwrtrc") cfg['templates'] = pkg_resources.resource_filename('hwrt', 'templates/') with open(rcfile, 'w') as f: yaml.dump(cfg, f, default_flow_style=False) return cfg['templates']
Get path to the folder where th HTML templates are.
def get_database_config_file(): cfg = get_project_configuration() if 'dbconfig' in cfg: if os.path.isfile(cfg['dbconfig']): return cfg['dbconfig'] else: logging.info("File '%s' was not found. Adjust 'dbconfig' in your " "~/.hwrtrc file.", cfg['dbconfig']) else: logging.info("No database connection file found. " "Specify 'dbconfig' in your ~/.hwrtrc file.") return None
Get the absolute path to the database configuration file.
def get_database_configuration(): db_config = get_database_config_file() if db_config is None: return None with open(db_config, 'r') as ymlfile: cfg = yaml.load(ymlfile) return cfg
Get database configuration as dictionary.
def input_int_default(question="", default=0): answer = input_string(question) if answer == "" or answer == "yes": return default else: return int(answer)
A function that works for both, Python 2.x and Python 3.x. It asks the user for input and returns it as a string.
def create_run_logfile(folder): with open(os.path.join(folder, "run.log"), "w") as f: datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") f.write("timestamp: '%s'" % datestring)
Create a 'run.log' within folder. This file contains the time of the latest successful run.
def choose_raw_dataset(currently=""): folder = os.path.join(get_project_root(), "raw-datasets") files = [os.path.join(folder, name) for name in os.listdir(folder) if name.endswith(".pickle")] default = -1 for i, filename in enumerate(files): if os.path.basename(currently) == os.path.basename(filename): default = i if i != default: print("[%i]\t%s" % (i, os.path.basename(filename))) else: print("\033[1m[%i]\033[0m\t%s" % (i, os.path.basename(filename))) i = input_int_default("Choose a dataset by number: ", default) return files[i]
Let the user choose a raw dataset. Return the absolute path.
def get_readable_time(t): ms = t % 1000 t -= ms t /= 1000 s = t % 60 t -= s t /= 60 minutes = t % 60 t -= minutes t /= 60 if t != 0: return "%ih, %i minutes %is %ims" % (t, minutes, s, ms) elif minutes != 0: return "%i minutes %is %ims" % (minutes, s, ms) elif s != 0: return "%is %ims" % (s, ms) else: return "%ims" % ms
Format the time to a readable format. Parameters ---------- t : int Time in ms Returns ------- string The time splitted to highest used time (minutes, hours, ...)
def default_model(): project_root = get_project_root() models_dir = os.path.join(project_root, "models") curr_dir = os.getcwd() if os.path.commonprefix([models_dir, curr_dir]) == models_dir and \ curr_dir != models_dir: latest_model = curr_dir else: latest_model = get_latest_folder(models_dir) return latest_model
Get a path for a default value for the model. Start searching in the current directory.
def create_adjusted_model_for_percentages(model_src, model_use): # Copy model file shutil.copyfile(model_src, model_use) # Adjust model file with open(model_src) as f: content = f.read() content = content.replace("logreg", "sigmoid") with open(model_use, "w") as f: f.write(content)
Replace logreg layer by sigmoid to get probabilities.
def create_hdf5(output_filename, feature_count, data): import h5py logging.info("Start creating of %s hdf file", output_filename) x = [] y = [] for features, label in data: assert len(features) == feature_count, \ "Expected %i features, got %i features" % \ (feature_count, len(features)) x.append(features) y.append(int(label)) Wfile = h5py.File(output_filename, 'w') Wfile.create_dataset("data", data=x, dtype='float32') Wfile.create_dataset("labels", data=y, dtype='int32') Wfile.close()
Create a HDF5 feature files. Parameters ---------- output_filename : string name of the HDF5 file that will be created feature_count : int dimension of all features combined data : list of tuples list of (x, y) tuples, where x is the feature vector of dimension ``feature_count`` and y is a label.
def get_recognizer_folders(model_folder): folders = [] folder = model_folder while os.path.isdir(folder): folders.append(folder) # Get info.yml with open(os.path.join(folder, "info.yml")) as ymlfile: content = yaml.load(ymlfile) folder = os.path.join(get_project_root(), content['data-source']) return folders[::-1]
Get a list of folders [preprocessed, feature-files, model].
def load_model(model_file): # Extract tar with tarfile.open(model_file) as tar: tarfolder = tempfile.mkdtemp() tar.extractall(path=tarfolder) from . import features from . import preprocessing # Get the preprocessing with open(os.path.join(tarfolder, "preprocessing.yml"), 'r') as ymlfile: preprocessing_description = yaml.load(ymlfile) preprocessing_queue = preprocessing.get_preprocessing_queue( preprocessing_description['queue']) # Get the features with open(os.path.join(tarfolder, "features.yml"), 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) # Get the model import nntoolkit.utils model = nntoolkit.utils.get_model(model_file) output_semantics_file = os.path.join(tarfolder, 'output_semantics.csv') output_semantics = nntoolkit.utils.get_outputs(output_semantics_file) # Cleanup shutil.rmtree(tarfolder) return (preprocessing_queue, feature_list, model, output_semantics)
Load a model by its file. This includes the model itself, but also the preprocessing queue, the feature list and the output semantics.
def evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording, recording_id=None): handwriting = handwritten_data.HandwrittenData(recording, raw_data_id=recording_id) handwriting.preprocessing(preprocessing_queue) x = handwriting.feature_extraction(feature_list) import nntoolkit.evaluate model_output = nntoolkit.evaluate.get_model_output(model, [x]) return nntoolkit.evaluate.get_results(model_output, output_semantics)
Evaluate a model for a single recording, after everything has been loaded. Parameters ---------- preprocessing_queue : list List of all preprocessing objects. feature_list : list List of all feature objects. model : dict Neural network model. output_semantics : list List that defines what an output means. recording : string in JSON format The handwritten recording in JSON format. recording_id : int or None For debugging purposes.
def get_possible_splits(n): get_bin = lambda x, n: x >= 0 and str(bin(x))[2:].zfill(n) or "-" + str(bin(x))[3:].zfill(n) possible_splits = [] for i in range(2**(n - 1)): possible_splits.append(get_bin(i, n - 1)) return possible_splits
Parameters ---------- n : int n strokes were make
def evaluate_model_single_recording_multisymbol(model_file, recording): (preprocessing_queue, feature_list, model, output_semantics) = load_model(model_file) logging.info("multiple symbol mode") logging.info(recording) results = evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording) return results
Evaluate a model for a single recording where possibly multiple symbols are. Parameters ---------- model_file : string Model file (.tar) recording : The handwritten recording.
def evaluate_model_single_recording(model_file, recording): (preprocessing_queue, feature_list, model, output_semantics) = load_model(model_file) results = evaluate_model_single_recording_preloaded(preprocessing_queue, feature_list, model, output_semantics, recording) return results
Evaluate a model for a single recording. Parameters ---------- model_file : string Model file (.tar) recording : The handwritten recording.
def _evaluate_model_single_file(target_folder, test_file): logging.info("Create running model...") model_src = get_latest_model(target_folder, "model") model_file_pointer = tempfile.NamedTemporaryFile(delete=False) model_use = model_file_pointer.name model_file_pointer.close() logging.info("Adjusted model is in %s.", model_use) create_adjusted_model_for_percentages(model_src, model_use) # Run evaluation project_root = get_project_root() time_prefix = time.strftime("%Y-%m-%d-%H-%M") logging.info("Evaluate '%s' with '%s'...", model_src, test_file) logfilefolder = os.path.join(project_root, "logs/") if not os.path.exists(logfilefolder): os.makedirs(logfilefolder) logfile = os.path.join(project_root, "logs/%s-error-evaluation.log" % time_prefix) with open(logfile, "w") as log, open(model_use, "r") as modl_src_p: p = subprocess.Popen([get_nntoolkit(), 'run', '--batch-size', '1', '-f%0.4f', test_file], stdin=modl_src_p, stdout=log) ret = p.wait() if ret != 0: logging.error("nntoolkit finished with ret code %s", str(ret)) sys.exit() return (logfile, model_use)
Evaluate a model for a single recording. Parameters ---------- target_folder : string Folder where the model is test_file : string The test file (.hdf5)
def evaluate_model(recording, model_folder, verbose=False): from . import preprocess_dataset from . import features for target_folder in get_recognizer_folders(model_folder): # The source is later than the target. That means we need to # refresh the target if "preprocessed" in target_folder: logging.info("Start applying preprocessing methods...") t = target_folder _, _, preprocessing_queue = preprocess_dataset.get_parameters(t) handwriting = handwritten_data.HandwrittenData(recording) if verbose: handwriting.show() handwriting.preprocessing(preprocessing_queue) if verbose: logging.debug("After preprocessing: %s", handwriting.get_sorted_pointlist()) handwriting.show() elif "feature-files" in target_folder: logging.info("Create feature file...") infofile_path = os.path.join(target_folder, "info.yml") with open(infofile_path, 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) feature_count = sum(map(lambda n: n.get_dimension(), feature_list)) x = handwriting.feature_extraction(feature_list) # Create hdf5 _, output_filename = tempfile.mkstemp(suffix='.hdf5', text=True) create_hdf5(output_filename, feature_count, [(x, 0)]) elif "model" in target_folder: logfile, model_use = _evaluate_model_single_file(target_folder, output_filename) return logfile else: logging.info("'%s' not found", target_folder) os.remove(output_filename) os.remove(model_use)
Evaluate model for a single recording.
def get_index2latex(model_description): index2latex = {} translation_csv = os.path.join(get_project_root(), model_description["data-source"], "index2formula_id.csv") with open(translation_csv) as csvfile: csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in csvreader: index2latex[int(row['index'])] = row['latex'] return index2latex
Get a dictionary that maps indices to LaTeX commands. Parameters ---------- model_description : string A model description file that points to a feature folder where an `index2formula_id.csv` has to be. Returns ------- dictionary : Maps indices to LaTeX commands
def get_index2data(model_description): index2latex = {} translation_csv = os.path.join(get_project_root(), model_description["data-source"], "index2formula_id.csv") with open(translation_csv) as csvfile: csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"') for row in csvreader: database_id = int(row['formula_id']) online_data = get_online_symbol_data(database_id) latex = online_data['formula_in_latex'] unicode_code_point = online_data['unicode_dec'] font = online_data['font'] font_style = online_data['font_style'] index2latex[int(row['index'])] = [database_id, latex, unicode_code_point, font, font_style] return index2latex
Get a dictionary that maps indices to a list of (1) the id in the hwrt symbol database (2) the latex command (3) the unicode code point (4) a font family and (5) a font style. Parameters ---------- model_description : string A model description file that points to a feature folder where an ``index2formula_id.csv`` has to be. Returns ------- dictionary that maps indices to lists of data Notes ----- This command need a database connection.
def get_online_symbol_data(database_id): import pymysql import pymysql.cursors cfg = get_database_configuration() mysql = cfg['mysql_online'] connection = pymysql.connect(host=mysql['host'], user=mysql['user'], passwd=mysql['passwd'], db=mysql['db'], cursorclass=pymysql.cursors.DictCursor) cursor = connection.cursor() sql = ("SELECT `id`, `formula_in_latex`, `unicode_dec`, `font`, " "`font_style` FROM `wm_formula` WHERE `id` =%i") % database_id cursor.execute(sql) datasets = cursor.fetchall() if len(datasets) == 1: return datasets[0] else: return None
Get from the server.
def classify_single_recording(raw_data_json, model_folder, verbose=False): evaluation_file = evaluate_model(raw_data_json, model_folder, verbose) with open(os.path.join(model_folder, "info.yml")) as ymlfile: model_description = yaml.load(ymlfile) index2latex = get_index2latex(model_description) # Map line to probabilites for LaTeX commands with open(evaluation_file) as f: probabilities = f.read() probabilities = map(float, probabilities.split(" ")) results = [] for index, probability in enumerate(probabilities): results.append((index2latex[index], probability)) results = sorted(results, key=lambda n: n[1], reverse=True) return results
Get the classification as a list of tuples. The first value is the LaTeX code, the second value is the probability.
def get_objectlist(description, config_key, module): object_list = [] for feature in description: for feat, params in feature.items(): feat = get_class(feat, config_key, module) if params is None: object_list.append(feat()) else: parameters = {} for dicts in params: for param_name, param_value in dicts.items(): parameters[param_name] = param_value object_list.append(feat(**parameters)) # pylint: disable=W0142 return object_list
Take a description and return a list of classes. Parameters ---------- description : list of dictionaries Each dictionary has only one entry. The key is the name of a class. The value of that entry is a list of dictionaries again. Those dictionaries are paramters. Returns ------- List of objects.
def get_class(name, config_key, module): clsmembers = inspect.getmembers(module, inspect.isclass) for string_name, act_class in clsmembers: if string_name == name: return act_class # Check if the user has specified a plugin and if the class is in there cfg = get_project_configuration() if config_key in cfg: modname = os.path.splitext(os.path.basename(cfg[config_key]))[0] if os.path.isfile(cfg[config_key]): usermodule = imp.load_source(modname, cfg[config_key]) clsmembers = inspect.getmembers(usermodule, inspect.isclass) for string_name, act_class in clsmembers: if string_name == name: return act_class else: logging.warning("File '%s' does not exist. Adjust ~/.hwrtrc.", cfg['data_analyzation_plugins']) logging.debug("Unknown class '%s'.", name) return None
Get the class by its name as a string.
def get_mysql_cfg(): environment = get_project_configuration()['environment'] cfg = get_database_configuration() if environment == 'production': mysql = cfg['mysql_online'] else: mysql = cfg['mysql_dev'] return mysql
Get the appropriate MySQL configuration
def softmax(w, t=1.0): w = [Decimal(el) for el in w] e = numpy.exp(numpy.array(w) / Decimal(t)) dist = e / numpy.sum(e) return dist
Calculate the softmax of a list of numbers w. Parameters ---------- w : list of numbers Returns ------- a list of the same length as w of non-negative numbers Examples -------- >>> softmax([0.1, 0.2]) array([ 0.47502081, 0.52497919]) >>> softmax([-0.1, 0.2]) array([ 0.42555748, 0.57444252]) >>> softmax([0.9, -10]) array([ 9.99981542e-01, 1.84578933e-05]) >>> softmax([0, 10]) array([ 4.53978687e-05, 9.99954602e-01])
def get_beam_cache_directory(): home = os.path.expanduser("~") cache_dir = os.path.join(home, '.hwrt-beam-cache') if not os.path.exists(cache_dir): os.makedirs(cache_dir) return cache_dir
Get a directory where pickled Beam Data can be stored. Create that directory, if it doesn't exist. Returns ------- str Path to the directory
def get_beam(secret_uuid): beam_dir = get_beam_cache_directory() beam_filename = os.path.join(beam_dir, secret_uuid) if os.path.isfile(beam_filename): with open(beam_filename, 'rb') as handle: beam = pickle.load(handle) return beam else: return None
Get a beam from the session with `secret_uuid`. Parameters ---------- secret_uuid : str Returns ------- The beam object if it exists, otherwise `None`.
def is_valid_uuid(uuid_to_test, version=4): try: uuid_obj = UUID(uuid_to_test, version=version) except ValueError: return False return str(uuid_obj) == uuid_to_test
Check if uuid_to_test is a valid UUID. Parameters ---------- uuid_to_test : str version : {1, 2, 3, 4} Returns ------- `True` if uuid_to_test is a valid UUID, otherwise `False`. Examples -------- >>> is_valid_uuid('c9bf9e57-1685-4c89-bafb-ff5af830be8a') True >>> is_valid_uuid('c9bf9e58') False
def prepare_table(table): n = len(table) for i, row in enumerate(table): assert len(row) == n for j, el in enumerate(row): if i == j: table[i][i] = 0.0 elif i > j: table[i][j] = 1-table[j][i] return table
Make the table 'symmetric' where the lower left part of the matrix is the reverse probability
def clusters(l, K): if l: prev = None for t in clusters(l[1:], K): tup = sorted(t) if tup != prev: prev = tup for i in range(K): yield tup[:i] + [[l[0]] + tup[i], ] + tup[i+1:] else: yield [[] for _ in range(K)]
Partition list ``l`` in ``K`` partitions. >>> l = [0, 1, 2] >>> list(clusters(l, K=3)) [[[0], [1], [2]], [[], [0, 1], [2]], [[], [1], [0, 2]], [[0], [], [1, 2]], [[], [0], [1, 2]], [[], [], [0, 1, 2]]] >>> list(clusters(l, K=2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[], [0, 1, 2]]] >>> list(clusters(l, K=1)) [[[0, 1, 2]]]
def neclusters(l, K): for c in clusters(l, K): if all(x for x in c): yield c
Partition list ``l`` in ``K`` partitions, without empty parts. >>> l = [0, 1, 2] >>> list(neclusters(l, 2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]] >>> list(neclusters(l, 1)) [[[0, 1, 2]]]
def all_segmentations(l): for K in range(1, len(l)+1): gen = neclusters(l, K) for el in gen: yield el
Get all segmentations of a list ``l``. This gets bigger fast. See https://oeis.org/A000110 For len(l) = 14 it is 190,899,322 >>> list(all_segmentations([0, 1, 2])) [[[0, 1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[0], [1], [2]]]
def find_index(segmentation, stroke_id): for i, symbol in enumerate(segmentation): for sid in symbol: if sid == stroke_id: return i return -1
>>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 0) 0 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 1) 0 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 5) 2 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 6) 2