text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Check if a given segmentation is out of order.
<END_TASK>
<USER_TASK:>
Description:
def _is_out_of_order(segmentation):
"""
Check if a given segmentation is out of order.
Examples
--------
>>> _is_out_of_order([[0, 1, 2, 3]])
False
>>> _is_out_of_order([[0, 1], [2, 3]])
False
>>> _is_out_of_order([[0, 1, 3], [2]])
True
""" |
last_stroke = -1
for symbol in segmentation:
for stroke in symbol:
if last_stroke > stroke:
return True
last_stroke = stroke
return False |
<SYSTEM_TASK:>
Get all intersections of the bounding boxes of strokes.
<END_TASK>
<USER_TASK:>
Description:
def get_bb_intersections(recording):
"""
Get all intersections of the bounding boxes of strokes.
Parameters
----------
recording : list of lists of integers
Returns
-------
A symmetrical matrix which indicates if two bounding boxes intersect.
""" |
intersections = numpy.zeros((len(recording), len(recording)),
dtype=bool)
for i in range(len(recording)-1):
a = geometry.get_bounding_box(recording[i]).grow(0.2)
for j in range(i+1, len(recording)):
b = geometry.get_bounding_box(recording[j]).grow(0.2)
intersections[i][j] = geometry.do_bb_intersect(a, b)
intersections[j][i] = intersections[i][j]
return intersections |
<SYSTEM_TASK:>
Get the probability of a written `symbol` having `count` strokes.
<END_TASK>
<USER_TASK:>
Description:
def p_strokes(symbol, count):
"""
Get the probability of a written `symbol` having `count` strokes.
Parameters
----------
symbol : str
LaTeX command
count : int, >= 1
Returns
-------
float
In [0.0, 1.0]
""" |
global stroke_prob
assert count >= 1
epsilon = 0.00000001
if stroke_prob is None:
misc_path = pkg_resources.resource_filename('hwrt', 'misc/')
stroke_prob_file = os.path.join(misc_path,
'prob_stroke_count_by_symbol.yml')
with open(stroke_prob_file, 'r') as stream:
stroke_prob = yaml.load(stream)
if symbol in stroke_prob:
if count in stroke_prob[symbol]:
return stroke_prob[symbol][count]
else:
return epsilon
return epsilon |
<SYSTEM_TASK:>
Get new guesses by assuming new_stroke is a new symbol.
<END_TASK>
<USER_TASK:>
Description:
def _add_hypotheses_assuming_new_stroke(self,
new_stroke,
stroke_nr,
new_beam):
"""
Get new guesses by assuming new_stroke is a new symbol.
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
stroke_nr : int
Number of the stroke for segmentation
new_beam : beam object
""" |
guesses = single_clf.predict({'data': [new_stroke],
'id': None})[:self.m]
for hyp in self.hypotheses:
new_geometry = deepcopy(hyp['geometry'])
most_right = new_geometry
if len(hyp['symbols']) == 0:
while 'right' in most_right:
most_right = most_right['right']
most_right['right'] = {'symbol_index': len(hyp['symbols']),
'right': None}
else:
most_right = {'symbol_index': len(hyp['symbols']),
'right': None}
for guess in guesses:
sym = {'symbol': guess['semantics'],
'probability': guess['probability']}
new_seg = deepcopy(hyp['segmentation'])
new_seg.append([stroke_nr])
new_sym = deepcopy(hyp['symbols'])
new_sym.append(sym)
b = {'segmentation': new_seg,
'symbols': new_sym,
'geometry': new_geometry,
'probability': None
}
# spacial_rels = [] # TODO
# for s1_indices, s2_indices in zip(b['segmentation'],
# b['segmentation'][1:]):
# tmp = [new_beam.history['data'][el] for el in s1_indices]
# s1 = HandwrittenData(json.dumps(tmp))
# tmp = [new_beam.history['data'][el] for el in s2_indices]
# s2 = HandwrittenData(json.dumps(tmp))
# rel = spacial_relationship.estimate(s1, s2)
# spacial_rels.append(rel)
# b['geometry'] = spacial_rels
new_beam.hypotheses.append(b) |
<SYSTEM_TASK:>
Update the beam so that it considers `new_stroke`.
<END_TASK>
<USER_TASK:>
Description:
def add_stroke(self, new_stroke):
"""
Update the beam so that it considers `new_stroke`.
When a `new_stroke` comes, it can either belong to a symbol for which
at least one other stroke was already made or belong to a symbol for
which `new_stroke` is the first stroke.
The number of hypotheses after q strokes without pruning is
f: N_0 -> N_0
f(0) = 1
f(1) = m
f(q) = f(q-1)*(m+n)
The number of time the single symbol classifier has to be called, when
already q hypotheses exist:
f_s: N_0 -> N_0
f_s(q) = q*n + 1 (upper bound)
Parameters
----------
new_stroke : list of dicts
A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
represent a point.
""" |
global single_clf
if len(self.hypotheses) == 0: # Don't put this in the constructor!
self.hypotheses = [{'segmentation': [],
'symbols': [],
'geometry': {},
'probability': Decimal(1)
}]
stroke_nr = len(self.history['data'])
new_history = deepcopy(self.history)
new_history['data'].append(new_stroke)
new_beam = Beam()
new_beam.history = new_history
evaluated_segmentations = []
# Get new guesses by assuming new_stroke belongs to an already begun
# symbol
had_multisymbol = False
for hyp in self.hypotheses:
# Add stroke to last n symbols (seperately)
for i in range(min(self.n, len(hyp['segmentation']))):
# Build stroke data
new_strokes = {'data': [], 'id': -1}
for stroke_index in hyp['segmentation'][-(i+1)]:
curr_stroke = self.history['data'][stroke_index]
new_strokes['data'].append(curr_stroke)
new_strokes['data'].append(new_stroke)
new_seg = deepcopy(hyp['segmentation'])
new_seg[-(i+1)].append(stroke_nr)
if new_seg in evaluated_segmentations:
continue
else:
evaluated_segmentations.append(new_seg)
# Predict this new collection of strokes
guesses = single_clf.predict(new_strokes)[:self.m]
for guess in guesses:
if guess['semantics'].split(";")[1] == "::MULTISYMBOL::":
# This was a wrong segmentation. Ignore it.
had_multisymbol = True
continue
sym = {'symbol': guess['semantics'],
'probability': guess['probability']}
new_sym = deepcopy(hyp['symbols'])
new_sym[-(i+1)] = sym
b = {'segmentation': new_seg,
'symbols': new_sym,
'geometry': deepcopy(hyp['geometry']),
'probability': None
}
new_beam.hypotheses.append(b)
if len(self.hypotheses) <= 1 or had_multisymbol:
self._add_hypotheses_assuming_new_stroke(new_stroke,
stroke_nr,
new_beam)
for hyp in new_beam.hypotheses:
hyp['probability'] = _calc_hypothesis_probability(hyp)
# Get probability again
# Get geometry of each beam entry
# TODO
# Update probabilities
# TODO
# Normalize to sum=1
self.hypotheses = new_beam.hypotheses
self.history = new_beam.history
self._prune()
new_probs = softmax([h['probability']
for h in self.hypotheses])
for hyp, prob in zip(self.hypotheses, new_probs):
hyp['probability'] = prob |
<SYSTEM_TASK:>
Shorten hypotheses to the best k ones.
<END_TASK>
<USER_TASK:>
Description:
def _prune(self):
"""Shorten hypotheses to the best k ones.""" |
self.hypotheses = sorted(self.hypotheses,
key=lambda e: e['probability'],
reverse=True)[:self.k] |
<SYSTEM_TASK:>
Get the matrices from a pickled files.
<END_TASK>
<USER_TASK:>
Description:
def get_matrices():
"""
Get the matrices from a pickled files.
Returns
-------
list
List of all matrices.
""" |
with open('hwrt/misc/is_one_symbol_classifier.pickle', 'rb') as f:
a = pickle.load(f)
arrays = []
for el1 in a.input_storage:
for el2 in el1.__dict__['storage']:
if isinstance(el2, cuda.CudaNdarray):
arrays.append({'storage': numpy.asarray(el2),
'name': el1.name})
else:
logging.warning("was type %s. Do nothing." % type(el2))
logging.debug(el1.name)
return arrays |
<SYSTEM_TASK:>
Create a tar file which contains the model.
<END_TASK>
<USER_TASK:>
Description:
def create_model_tar(matrices, tarname="model-cuda-converted.tar"):
"""
Create a tar file which contains the model.
Parameters
----------
matrices : list
tarname : str
Target file which will be created.
""" |
# Write layers
filenames = []
for layer in range(len(matrices)):
if matrices[layer]['name'] == 'W':
weights = matrices[layer]['storage']
weights_file = h5py.File('W%i.hdf5' % (layer / 2), 'w')
weights_file.create_dataset(weights_file.id.name, data=weights)
weights_file.close()
filenames.append('W%i.hdf5' % (layer / 2))
elif matrices[layer]['name'] == 'b':
b = matrices[layer]['storage']
bfile = h5py.File('b%i.hdf5' % (layer / 2), 'w')
bfile.create_dataset(bfile.id.name, data=b)
bfile.close()
filenames.append('b%i.hdf5' % (layer / 2))
# activation = a['layers'][layer]['_props']['activation']
# activation = activation.replace('sigmoid', 'Sigmoid')
# activation = activation.replace('softmax', 'Softmax')
# layers.append({'W': {'size': list(W.shape),
# 'filename': 'W%i.hdf5' % layer},
# 'b': {'size': list(b.shape),
# 'filename': 'b%i.hdf5' % layer},
# 'activation': activation})
with tarfile.open(tarname, "w:") as tar:
for name in filenames:
tar.add(name)
# Remove temporary files which are now in tar file
for filename in filenames:
os.remove(filename) |
<SYSTEM_TASK:>
Check if the currently running Python version is new enough.
<END_TASK>
<USER_TASK:>
Description:
def check_python_version():
"""Check if the currently running Python version is new enough.""" |
# Required due to multiple with statements on one line
req_version = (2, 7)
cur_version = sys.version_info
if cur_version >= req_version:
print("Python version... %sOK%s (found %s, requires %s)" %
(Bcolors.OKGREEN, Bcolors.ENDC, str(platform.python_version()),
str(req_version[0]) + "." + str(req_version[1])))
else:
print("Python version... %sFAIL%s (found %s, requires %s)" %
(Bcolors.FAIL, Bcolors.ENDC, str(cur_version),
str(req_version))) |
<SYSTEM_TASK:>
Execute all checks.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Execute all checks.""" |
check_python_version()
check_python_modules()
check_executables()
home = os.path.expanduser("~")
print("\033[1mCheck files\033[0m")
rcfile = os.path.join(home, ".hwrtrc")
if os.path.isfile(rcfile):
print("~/.hwrtrc... %sFOUND%s" %
(Bcolors.OKGREEN, Bcolors.ENDC))
else:
print("~/.hwrtrc... %sNOT FOUND%s" %
(Bcolors.FAIL, Bcolors.ENDC))
misc_path = pkg_resources.resource_filename('hwrt', 'misc/')
print("misc-path: %s" % misc_path) |
<SYSTEM_TASK:>
Merge two raw datasets into one.
<END_TASK>
<USER_TASK:>
Description:
def merge(d1, d2):
"""Merge two raw datasets into one.
Parameters
----------
d1 : dict
d2 : dict
Returns
-------
dict
""" |
if d1['formula_id2latex'] is None:
formula_id2latex = {}
else:
formula_id2latex = d1['formula_id2latex'].copy()
formula_id2latex.update(d2['formula_id2latex'])
handwriting_datasets = d1['handwriting_datasets']
for dataset in d2['handwriting_datasets']:
handwriting_datasets.append(dataset)
return {'formula_id2latex': formula_id2latex,
'handwriting_datasets': handwriting_datasets} |
<SYSTEM_TASK:>
Check if file is there and if the md5_hash is correct.
<END_TASK>
<USER_TASK:>
Description:
def is_file_consistent(local_path_file, md5_hash):
"""Check if file is there and if the md5_hash is correct.""" |
return os.path.isfile(local_path_file) and \
hashlib.md5(open(local_path_file, 'rb').read()).hexdigest() == md5_hash |
<SYSTEM_TASK:>
Main part of the download script.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Main part of the download script.""" |
# Read config file. This has to get updated via git
project_root = utils.get_project_root()
infofile = os.path.join(project_root, "raw-datasets/info.yml")
logging.info("Read '%s'...", infofile)
with open(infofile, 'r') as ymlfile:
datasets = yaml.load(ymlfile)
for dataset in datasets:
local_path_file = os.path.join(project_root, dataset['online_path'])
i = 0
while not is_file_consistent(local_path_file, dataset['md5']) and i < 3:
if os.path.isfile(local_path_file):
local_file_size = os.path.getsize(local_path_file)
logging.info("MD5 codes differ. ")
logging.info("The file size of the downloaded file is %s.",
utils.sizeof_fmt(local_file_size))
logging.info("Download the file '%s'...", dataset['online_path'])
urllib.urlretrieve(dataset['url'], local_path_file)
i += 1
if i < 10:
logging.info("Found '%s'.", dataset['online_path']) |
<SYSTEM_TASK:>
Load a n-gram language model for mathematics in ARPA format which gets
<END_TASK>
<USER_TASK:>
Description:
def load_model():
"""
Load a n-gram language model for mathematics in ARPA format which gets
shipped with hwrt.
Returns
-------
A NgramLanguageModel object
""" |
logging.info("Load language model...")
ngram_arpa_t = pkg_resources.resource_filename('hwrt',
'misc/ngram.arpa.tar.bz2')
with tarfile.open(ngram_arpa_t, 'r:bz2') as tar:
tarfolder = tempfile.mkdtemp()
tar.extractall(path=tarfolder)
ngram_arpa_f = os.path.join(tarfolder, 'ngram.arpa')
with open(ngram_arpa_f) as f:
content = f.read()
ngram_model = NgramLanguageModel()
ngram_model.load_from_arpa_str(content)
return ngram_model |
<SYSTEM_TASK:>
Initialize N-gram model by reading an ARPA language model string.
<END_TASK>
<USER_TASK:>
Description:
def load_from_arpa_str(self, arpa_str):
"""
Initialize N-gram model by reading an ARPA language model string.
Parameters
----------
arpa_str : str
A string in ARPA language model file format
""" |
data_found = False
end_found = False
in_ngram_block = 0
for i, line in enumerate(arpa_str.split("\n")):
if not end_found:
if not data_found:
if "\\data\\" in line:
data_found = True
else:
if in_ngram_block == 0:
if line.startswith("ngram"):
ngram_type, count = line.split("=")
_, n = ngram_type.split(" ")
n = int(n)
self.ngrams[n] = {'data': {},
'count': count}
elif line.startswith("\\"):
n = int(line.split("-")[0][1:])
in_ngram_block = n
else:
continue # Empty line
elif in_ngram_block > 0:
if "\\end\\" in line:
end_found = True
elif line.startswith("\\"):
n = int(line.split("-")[0][1:])
in_ngram_block = n
elif len(line) <= 1:
continue
else:
data = line.split("\t")
probability = Decimal(data[0])
ngram = data[1:]
if len(ngram) != n:
raise Exception(("ARPA language file is "
"inconsistant. Line %i has "
"only %i items, but should "
"have %i items.") %
(i, len(ngram), n))
rest = ngram
append_to = self.ngrams[n]['data']
while len(rest) > 1:
first, rest = rest[0], rest[1:]
if first not in append_to:
append_to[first] = {}
append_to = append_to[first]
if rest[0] in append_to:
raise Exception(("Duplicate entry for "
"ngram %s") % ngram)
append_to[rest[0]] = probability
else:
if line.startswith("info: "):
logging.info(line[6:]) |
<SYSTEM_TASK:>
Calculate the probability of a sentence, given this language model.
<END_TASK>
<USER_TASK:>
Description:
def get_probability(self, sentence):
"""
Calculate the probability of a sentence, given this language model.
Get P(sentence) = P(w1, w2, w3, ..., wn)
= P(w1, w2, w3) * P(w2, w3, w4) *...* P(wn-2, wn-1, wn)
Parameters
----------
sentence : list
A list of strings / tokens.
""" |
if len(sentence) == 1:
return Decimal(10)**self.get_unigram_log_prob(sentence)
elif len(sentence) == 2:
return Decimal(10)**self.get_bigram_log_prob(sentence)
else:
log_prob = Decimal(0.0)
for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]):
log_prob += self.get_trigram_log_prob((w1, w2, w3))
log_prob = Decimal(log_prob)
return Decimal(10)**log_prob |
<SYSTEM_TASK:>
Evaluate all recordings in `sample_dir`.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_dir(sample_dir):
"""Evaluate all recordings in `sample_dir`.
Parameters
----------
sample_dir : string
The path to a directory with *.inkml files.
Returns
-------
list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
""" |
results = []
if sample_dir[-1] == "/":
sample_dir = sample_dir[:-1]
for filename in glob.glob("%s/*.inkml" % sample_dir):
results.append(evaluate_inkml(filename))
return results |
<SYSTEM_TASK:>
Evaluate an InkML file.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_inkml(inkml_file_path):
"""Evaluate an InkML file.
Parameters
----------
inkml_file_path : string
path to an InkML file
Returns
-------
dictionary
The dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has the
keys 'semantics' (which contains the latex command) and 'probability'
""" |
logging.info("Start evaluating '%s'...", inkml_file_path)
ret = {'filename': inkml_file_path}
recording = inkml.read(inkml_file_path)
results = evaluate(json.dumps(recording.get_sorted_pointlist()),
result_format='LaTeX')
ret['results'] = results
return ret |
<SYSTEM_TASK:>
Generate the evaluation results in the format
<END_TASK>
<USER_TASK:>
Description:
def generate_output_csv(evaluation_results, filename='results.csv'):
"""Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
""" |
with open(filename, 'w') as f:
for result in evaluation_results:
for i, entry in enumerate(result['results']):
if entry['semantics'] == ',':
result['results']['semantics'] = 'COMMA'
f.write("%s, " % result['filename'])
f.write(", ".join([entry['semantics'] for entry in result['results']]))
f.write("\n")
f.write("%s, " % "scores")
f.write(", ".join([str(entry['probability']) for entry in result['results']]))
f.write("\n") |
<SYSTEM_TASK:>
Get project configuration as dictionary.
<END_TASK>
<USER_TASK:>
Description:
def get_project_configuration():
"""Get project configuration as dictionary.""" |
home = os.path.expanduser("~")
rcfile = os.path.join(home, ".hwrtrc")
if not os.path.isfile(rcfile):
create_project_configuration(rcfile)
with open(rcfile, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
return cfg |
<SYSTEM_TASK:>
Create a project configuration file which contains a configuration
<END_TASK>
<USER_TASK:>
Description:
def create_project_configuration(filename):
"""Create a project configuration file which contains a configuration
that might make sense.""" |
home = os.path.expanduser("~")
project_root_folder = os.path.join(home, "hwr-experiments")
config = {'root': project_root_folder,
'nntoolkit': None,
'dropbox_app_key': None,
'dropbox_app_secret': None,
'dbconfig': os.path.join(home, "hwrt-config/db.config.yml"),
'data_analyzation_queue': [{'Creator': None}],
'worker_api_key': '1234567890abc',
'environment': 'development'}
with open(filename, 'w') as f:
yaml.dump(config, f, default_flow_style=False) |
<SYSTEM_TASK:>
Get the project root folder as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_project_root():
"""Get the project root folder as a string.""" |
cfg = get_project_configuration()
# At this point it can be sure that the configuration file exists
# Now make sure the project structure exists
for dirname in ["raw-datasets",
"preprocessed",
"feature-files",
"models",
"reports"]:
directory = os.path.join(cfg['root'], dirname)
if not os.path.exists(directory):
os.makedirs(directory)
raw_yml_path = pkg_resources.resource_filename('hwrt', 'misc/')
# TODO: How to check for updates if it already exists?
raw_data_dst = os.path.join(cfg['root'], "raw-datasets/info.yml")
if not os.path.isfile(raw_data_dst):
raw_yml_pkg_src = os.path.join(raw_yml_path, "info.yml")
shutil.copy(raw_yml_pkg_src, raw_data_dst)
# Make sure small-baseline folders exists
for dirname in ["models/small-baseline", "feature-files/small-baseline",
"preprocessed/small-baseline"]:
directory = os.path.join(cfg['root'], dirname)
if not os.path.exists(directory):
os.makedirs(directory)
# Make sure small-baseline yml files exist
paths = [("preprocessed/small-baseline/", "preprocessing-small-info.yml"),
("feature-files/small-baseline/", "feature-small-info.yml"),
("models/small-baseline/", "model-small-info.yml")]
for dest, src in paths:
raw_data_dst = os.path.join(cfg['root'], "%s/info.yml" % dest)
if not os.path.isfile(raw_data_dst):
raw_yml_pkg_src = os.path.join(raw_yml_path, src)
shutil.copy(raw_yml_pkg_src, raw_data_dst)
return cfg['root'] |
<SYSTEM_TASK:>
Get path to the folder where th HTML templates are.
<END_TASK>
<USER_TASK:>
Description:
def get_template_folder():
"""Get path to the folder where th HTML templates are.""" |
cfg = get_project_configuration()
if 'templates' not in cfg:
home = os.path.expanduser("~")
rcfile = os.path.join(home, ".hwrtrc")
cfg['templates'] = pkg_resources.resource_filename('hwrt',
'templates/')
with open(rcfile, 'w') as f:
yaml.dump(cfg, f, default_flow_style=False)
return cfg['templates'] |
<SYSTEM_TASK:>
Get the absolute path to the database configuration file.
<END_TASK>
<USER_TASK:>
Description:
def get_database_config_file():
"""Get the absolute path to the database configuration file.""" |
cfg = get_project_configuration()
if 'dbconfig' in cfg:
if os.path.isfile(cfg['dbconfig']):
return cfg['dbconfig']
else:
logging.info("File '%s' was not found. Adjust 'dbconfig' in your "
"~/.hwrtrc file.",
cfg['dbconfig'])
else:
logging.info("No database connection file found. "
"Specify 'dbconfig' in your ~/.hwrtrc file.")
return None |
<SYSTEM_TASK:>
A function that works for both, Python 2.x and Python 3.x.
<END_TASK>
<USER_TASK:>
Description:
def input_int_default(question="", default=0):
"""A function that works for both, Python 2.x and Python 3.x.
It asks the user for input and returns it as a string.
""" |
answer = input_string(question)
if answer == "" or answer == "yes":
return default
else:
return int(answer) |
<SYSTEM_TASK:>
Create a 'run.log' within folder. This file contains the time of the
<END_TASK>
<USER_TASK:>
Description:
def create_run_logfile(folder):
"""Create a 'run.log' within folder. This file contains the time of the
latest successful run.
""" |
with open(os.path.join(folder, "run.log"), "w") as f:
datestring = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
f.write("timestamp: '%s'" % datestring) |
<SYSTEM_TASK:>
Let the user choose a raw dataset. Return the absolute path.
<END_TASK>
<USER_TASK:>
Description:
def choose_raw_dataset(currently=""):
"""Let the user choose a raw dataset. Return the absolute path.""" |
folder = os.path.join(get_project_root(), "raw-datasets")
files = [os.path.join(folder, name) for name in os.listdir(folder)
if name.endswith(".pickle")]
default = -1
for i, filename in enumerate(files):
if os.path.basename(currently) == os.path.basename(filename):
default = i
if i != default:
print("[%i]\t%s" % (i, os.path.basename(filename)))
else:
print("\033[1m[%i]\033[0m\t%s" % (i, os.path.basename(filename)))
i = input_int_default("Choose a dataset by number: ", default)
return files[i] |
<SYSTEM_TASK:>
Format the time to a readable format.
<END_TASK>
<USER_TASK:>
Description:
def get_readable_time(t):
"""
Format the time to a readable format.
Parameters
----------
t : int
Time in ms
Returns
-------
string
The time splitted to highest used time (minutes, hours, ...)
""" |
ms = t % 1000
t -= ms
t /= 1000
s = t % 60
t -= s
t /= 60
minutes = t % 60
t -= minutes
t /= 60
if t != 0:
return "%ih, %i minutes %is %ims" % (t, minutes, s, ms)
elif minutes != 0:
return "%i minutes %is %ims" % (minutes, s, ms)
elif s != 0:
return "%is %ims" % (s, ms)
else:
return "%ims" % ms |
<SYSTEM_TASK:>
Get a path for a default value for the model. Start searching in the
<END_TASK>
<USER_TASK:>
Description:
def default_model():
"""Get a path for a default value for the model. Start searching in the
current directory.""" |
project_root = get_project_root()
models_dir = os.path.join(project_root, "models")
curr_dir = os.getcwd()
if os.path.commonprefix([models_dir, curr_dir]) == models_dir and \
curr_dir != models_dir:
latest_model = curr_dir
else:
latest_model = get_latest_folder(models_dir)
return latest_model |
<SYSTEM_TASK:>
Replace logreg layer by sigmoid to get probabilities.
<END_TASK>
<USER_TASK:>
Description:
def create_adjusted_model_for_percentages(model_src, model_use):
"""Replace logreg layer by sigmoid to get probabilities.""" |
# Copy model file
shutil.copyfile(model_src, model_use)
# Adjust model file
with open(model_src) as f:
content = f.read()
content = content.replace("logreg", "sigmoid")
with open(model_use, "w") as f:
f.write(content) |
<SYSTEM_TASK:>
Create a HDF5 feature files.
<END_TASK>
<USER_TASK:>
Description:
def create_hdf5(output_filename, feature_count, data):
"""
Create a HDF5 feature files.
Parameters
----------
output_filename : string
name of the HDF5 file that will be created
feature_count : int
dimension of all features combined
data : list of tuples
list of (x, y) tuples, where x is the feature vector of dimension
``feature_count`` and y is a label.
""" |
import h5py
logging.info("Start creating of %s hdf file", output_filename)
x = []
y = []
for features, label in data:
assert len(features) == feature_count, \
"Expected %i features, got %i features" % \
(feature_count, len(features))
x.append(features)
y.append(int(label))
Wfile = h5py.File(output_filename, 'w')
Wfile.create_dataset("data", data=x, dtype='float32')
Wfile.create_dataset("labels", data=y, dtype='int32')
Wfile.close() |
<SYSTEM_TASK:>
Load a model by its file. This includes the model itself, but also
<END_TASK>
<USER_TASK:>
Description:
def load_model(model_file):
"""Load a model by its file. This includes the model itself, but also
the preprocessing queue, the feature list and the output semantics.
""" |
# Extract tar
with tarfile.open(model_file) as tar:
tarfolder = tempfile.mkdtemp()
tar.extractall(path=tarfolder)
from . import features
from . import preprocessing
# Get the preprocessing
with open(os.path.join(tarfolder, "preprocessing.yml"), 'r') as ymlfile:
preprocessing_description = yaml.load(ymlfile)
preprocessing_queue = preprocessing.get_preprocessing_queue(
preprocessing_description['queue'])
# Get the features
with open(os.path.join(tarfolder, "features.yml"), 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
feature_str_list = feature_description['features']
feature_list = features.get_features(feature_str_list)
# Get the model
import nntoolkit.utils
model = nntoolkit.utils.get_model(model_file)
output_semantics_file = os.path.join(tarfolder, 'output_semantics.csv')
output_semantics = nntoolkit.utils.get_outputs(output_semantics_file)
# Cleanup
shutil.rmtree(tarfolder)
return (preprocessing_queue, feature_list, model, output_semantics) |
<SYSTEM_TASK:>
Evaluate a model for a single recording, after everything has been loaded.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording,
recording_id=None):
"""
Evaluate a model for a single recording, after everything has been loaded.
Parameters
----------
preprocessing_queue : list
List of all preprocessing objects.
feature_list : list
List of all feature objects.
model : dict
Neural network model.
output_semantics : list
List that defines what an output means.
recording : string in JSON format
The handwritten recording in JSON format.
recording_id : int or None
For debugging purposes.
""" |
handwriting = handwritten_data.HandwrittenData(recording,
raw_data_id=recording_id)
handwriting.preprocessing(preprocessing_queue)
x = handwriting.feature_extraction(feature_list)
import nntoolkit.evaluate
model_output = nntoolkit.evaluate.get_model_output(model, [x])
return nntoolkit.evaluate.get_results(model_output, output_semantics) |
<SYSTEM_TASK:>
Evaluate a model for a single recording, after everything has been loaded.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_model_single_recording_preloaded_multisymbol(preprocessing_queue,
feature_list,
model,
output_semantics,
recording):
"""
Evaluate a model for a single recording, after everything has been loaded.
Multiple symbols are recognized.
Parameters
----------
preprocessing_queue : list
List of all preprocessing objects.
feature_list : list
List of all feature objects.
model : dict
Neural network model.
output_semantics :
List that defines what an output means.
recording :
The handwritten recording in JSON format.
""" |
import json
import nntoolkit.evaluate
recording = json.loads(recording)
logging.info(("## start (%i strokes)" % len(recording)) + "#" * 80)
hypotheses = [] # [[{'score': 0.123, symbols: [123, 123]}] # split0
# []] # Split i...
for split in get_possible_splits(len(recording)):
recording_segmented = segment_by_split(split, recording)
cur_split_results = []
for i, symbol in enumerate(recording_segmented):
handwriting = handwritten_data.HandwrittenData(json.dumps(symbol))
handwriting.preprocessing(preprocessing_queue)
x = handwriting.feature_extraction(feature_list)
model_output = nntoolkit.evaluate.get_model_output(model, [x])
results = nntoolkit.evaluate.get_results(model_output,
output_semantics)
results = results[:10]
cur_split_results.append([el for el in results if el['probability'] >= 0.01])
# serve.show_results(results, n=10)
# Now that I have all symbols of this split, I have to get all
# combinations of the hypothesis
import itertools
for hyp in itertools.product(*cur_split_results):
hypotheses.append({'score': reduce(lambda x, y: x*y,
[s['probability'] for s in hyp])*len(hyp)/len(recording),
'symbols': [s['semantics'] for s in hyp],
'min_part': min([s['probability'] for s in hyp]),
'segmentation': split})
hypotheses = sorted(hypotheses, key=lambda n: n['min_part'], reverse=True)[:10]
for i, hyp in enumerate(hypotheses):
if hyp['score'] > 0.001:
logging.info("%0.4f: %s (seg: %s)", hyp['score'], hyp['symbols'], hyp['segmentation'])
return nntoolkit.evaluate.get_results(model_output, output_semantics) |
<SYSTEM_TASK:>
Evaluate a model for a single recording where possibly multiple symbols
<END_TASK>
<USER_TASK:>
Description:
def evaluate_model_single_recording_multisymbol(model_file, recording):
"""
Evaluate a model for a single recording where possibly multiple symbols
are.
Parameters
----------
model_file : string
Model file (.tar)
recording :
The handwritten recording.
""" |
(preprocessing_queue, feature_list, model,
output_semantics) = load_model(model_file)
logging.info("multiple symbol mode")
logging.info(recording)
results = evaluate_model_single_recording_preloaded(preprocessing_queue,
feature_list,
model,
output_semantics,
recording)
return results |
<SYSTEM_TASK:>
Evaluate model for a single recording.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_model(recording, model_folder, verbose=False):
"""Evaluate model for a single recording.""" |
from . import preprocess_dataset
from . import features
for target_folder in get_recognizer_folders(model_folder):
# The source is later than the target. That means we need to
# refresh the target
if "preprocessed" in target_folder:
logging.info("Start applying preprocessing methods...")
t = target_folder
_, _, preprocessing_queue = preprocess_dataset.get_parameters(t)
handwriting = handwritten_data.HandwrittenData(recording)
if verbose:
handwriting.show()
handwriting.preprocessing(preprocessing_queue)
if verbose:
logging.debug("After preprocessing: %s",
handwriting.get_sorted_pointlist())
handwriting.show()
elif "feature-files" in target_folder:
logging.info("Create feature file...")
infofile_path = os.path.join(target_folder, "info.yml")
with open(infofile_path, 'r') as ymlfile:
feature_description = yaml.load(ymlfile)
feature_str_list = feature_description['features']
feature_list = features.get_features(feature_str_list)
feature_count = sum(map(lambda n: n.get_dimension(),
feature_list))
x = handwriting.feature_extraction(feature_list)
# Create hdf5
_, output_filename = tempfile.mkstemp(suffix='.hdf5', text=True)
create_hdf5(output_filename, feature_count, [(x, 0)])
elif "model" in target_folder:
logfile, model_use = _evaluate_model_single_file(target_folder,
output_filename)
return logfile
else:
logging.info("'%s' not found", target_folder)
os.remove(output_filename)
os.remove(model_use) |
<SYSTEM_TASK:>
Get a dictionary that maps indices to LaTeX commands.
<END_TASK>
<USER_TASK:>
Description:
def get_index2latex(model_description):
"""
Get a dictionary that maps indices to LaTeX commands.
Parameters
----------
model_description : string
A model description file that points to a feature folder where an
`index2formula_id.csv` has to be.
Returns
-------
dictionary :
Maps indices to LaTeX commands
""" |
index2latex = {}
translation_csv = os.path.join(get_project_root(),
model_description["data-source"],
"index2formula_id.csv")
with open(translation_csv) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
index2latex[int(row['index'])] = row['latex']
return index2latex |
<SYSTEM_TASK:>
Get the classification as a list of tuples. The first value is the LaTeX
<END_TASK>
<USER_TASK:>
Description:
def classify_single_recording(raw_data_json, model_folder, verbose=False):
"""
Get the classification as a list of tuples. The first value is the LaTeX
code, the second value is the probability.
""" |
evaluation_file = evaluate_model(raw_data_json, model_folder, verbose)
with open(os.path.join(model_folder, "info.yml")) as ymlfile:
model_description = yaml.load(ymlfile)
index2latex = get_index2latex(model_description)
# Map line to probabilites for LaTeX commands
with open(evaluation_file) as f:
probabilities = f.read()
probabilities = map(float, probabilities.split(" "))
results = []
for index, probability in enumerate(probabilities):
results.append((index2latex[index], probability))
results = sorted(results, key=lambda n: n[1], reverse=True)
return results |
<SYSTEM_TASK:>
Take a description and return a list of classes.
<END_TASK>
<USER_TASK:>
Description:
def get_objectlist(description, config_key, module):
"""
Take a description and return a list of classes.
Parameters
----------
description : list of dictionaries
Each dictionary has only one entry. The key is the name of a class. The
value of that entry is a list of dictionaries again. Those dictionaries
are paramters.
Returns
-------
List of objects.
""" |
object_list = []
for feature in description:
for feat, params in feature.items():
feat = get_class(feat, config_key, module)
if params is None:
object_list.append(feat())
else:
parameters = {}
for dicts in params:
for param_name, param_value in dicts.items():
parameters[param_name] = param_value
object_list.append(feat(**parameters)) # pylint: disable=W0142
return object_list |
<SYSTEM_TASK:>
Get the class by its name as a string.
<END_TASK>
<USER_TASK:>
Description:
def get_class(name, config_key, module):
"""Get the class by its name as a string.""" |
clsmembers = inspect.getmembers(module, inspect.isclass)
for string_name, act_class in clsmembers:
if string_name == name:
return act_class
# Check if the user has specified a plugin and if the class is in there
cfg = get_project_configuration()
if config_key in cfg:
modname = os.path.splitext(os.path.basename(cfg[config_key]))[0]
if os.path.isfile(cfg[config_key]):
usermodule = imp.load_source(modname, cfg[config_key])
clsmembers = inspect.getmembers(usermodule, inspect.isclass)
for string_name, act_class in clsmembers:
if string_name == name:
return act_class
else:
logging.warning("File '%s' does not exist. Adjust ~/.hwrtrc.",
cfg['data_analyzation_plugins'])
logging.debug("Unknown class '%s'.", name)
return None |
<SYSTEM_TASK:>
Calculate the softmax of a list of numbers w.
<END_TASK>
<USER_TASK:>
Description:
def softmax(w, t=1.0):
"""Calculate the softmax of a list of numbers w.
Parameters
----------
w : list of numbers
Returns
-------
a list of the same length as w of non-negative numbers
Examples
--------
>>> softmax([0.1, 0.2])
array([ 0.47502081, 0.52497919])
>>> softmax([-0.1, 0.2])
array([ 0.42555748, 0.57444252])
>>> softmax([0.9, -10])
array([ 9.99981542e-01, 1.84578933e-05])
>>> softmax([0, 10])
array([ 4.53978687e-05, 9.99954602e-01])
""" |
w = [Decimal(el) for el in w]
e = numpy.exp(numpy.array(w) / Decimal(t))
dist = e / numpy.sum(e)
return dist |
<SYSTEM_TASK:>
Get a directory where pickled Beam Data can be stored.
<END_TASK>
<USER_TASK:>
Description:
def get_beam_cache_directory():
"""
Get a directory where pickled Beam Data can be stored.
Create that directory, if it doesn't exist.
Returns
-------
str
Path to the directory
""" |
home = os.path.expanduser("~")
cache_dir = os.path.join(home, '.hwrt-beam-cache')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return cache_dir |
<SYSTEM_TASK:>
Get a beam from the session with `secret_uuid`.
<END_TASK>
<USER_TASK:>
Description:
def get_beam(secret_uuid):
"""
Get a beam from the session with `secret_uuid`.
Parameters
----------
secret_uuid : str
Returns
-------
The beam object if it exists, otherwise `None`.
""" |
beam_dir = get_beam_cache_directory()
beam_filename = os.path.join(beam_dir, secret_uuid)
if os.path.isfile(beam_filename):
with open(beam_filename, 'rb') as handle:
beam = pickle.load(handle)
return beam
else:
return None |
<SYSTEM_TASK:>
Make the table 'symmetric' where the lower left part of the matrix is
<END_TASK>
<USER_TASK:>
Description:
def prepare_table(table):
"""Make the table 'symmetric' where the lower left part of the matrix is
the reverse probability
""" |
n = len(table)
for i, row in enumerate(table):
assert len(row) == n
for j, el in enumerate(row):
if i == j:
table[i][i] = 0.0
elif i > j:
table[i][j] = 1-table[j][i]
return table |
<SYSTEM_TASK:>
Partition list ``l`` in ``K`` partitions, without empty parts.
<END_TASK>
<USER_TASK:>
Description:
def neclusters(l, K):
"""Partition list ``l`` in ``K`` partitions, without empty parts.
>>> l = [0, 1, 2]
>>> list(neclusters(l, 2))
[[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]]
>>> list(neclusters(l, 1))
[[[0, 1, 2]]]
""" |
for c in clusters(l, K):
if all(x for x in c):
yield c |
<SYSTEM_TASK:>
Get all segmentations of a list ``l``.
<END_TASK>
<USER_TASK:>
Description:
def all_segmentations(l):
"""Get all segmentations of a list ``l``.
This gets bigger fast. See https://oeis.org/A000110
For len(l) = 14 it is 190,899,322
>>> list(all_segmentations([0, 1, 2]))
[[[0, 1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[0], [1], [2]]]
""" |
for K in range(1, len(l)+1):
gen = neclusters(l, K)
for el in gen:
yield el |
<SYSTEM_TASK:>
Test if ``s1`` and ``s2`` are in the same symbol, given the
<END_TASK>
<USER_TASK:>
Description:
def q(segmentation, s1, s2):
"""Test if ``s1`` and ``s2`` are in the same symbol, given the
``segmentation``.
""" |
index1 = find_index(segmentation, s1)
index2 = find_index(segmentation, s2)
return index1 == index2 |
<SYSTEM_TASK:>
Get the score of a segmentation.
<END_TASK>
<USER_TASK:>
Description:
def score_segmentation(segmentation, table):
"""Get the score of a segmentation.""" |
stroke_nr = sum(1 for symbol in segmentation for stroke in symbol)
score = 1
for i in range(stroke_nr):
for j in range(i+1, stroke_nr):
qval = q(segmentation, i, j)
if qval:
score *= table[i][j]
else:
score *= table[j][i]
return score |
<SYSTEM_TASK:>
Push an ``element`` into the datastrucutre together with its value
<END_TASK>
<USER_TASK:>
Description:
def push(self, element, value):
"""Push an ``element`` into the datastrucutre together with its value
and only save it if it currently is one of the top n elements.
Drop elements if necessary.
""" |
insert_pos = 0
for index, el in enumerate(self.tops):
if not self.find_min and el[1] >= value:
insert_pos = index+1
elif self.find_min and el[1] <= value:
insert_pos = index+1
self.tops.insert(insert_pos, [element, value])
self.tops = self.tops[:self.n] |
<SYSTEM_TASK:>
Serializes a numpy array to a compressed base64 string
<END_TASK>
<USER_TASK:>
Description:
def _array2cstr(arr):
""" Serializes a numpy array to a compressed base64 string """ |
out = StringIO()
np.save(out, arr)
return b64encode(out.getvalue()) |
<SYSTEM_TASK:>
Reconstructs a numpy array from a plain-text string
<END_TASK>
<USER_TASK:>
Description:
def _str2array(d):
""" Reconstructs a numpy array from a plain-text string """ |
if type(d) == list:
return np.asarray([_str2array(s) for s in d])
ins = StringIO(d)
return np.loadtxt(ins) |
<SYSTEM_TASK:>
Create a 'output_semantics.csv' file which contains information what the
<END_TASK>
<USER_TASK:>
Description:
def create_output_semantics(model_folder, outputs):
"""
Create a 'output_semantics.csv' file which contains information what the
output of the single output neurons mean.
Parameters
----------
model_folder : str
folder where the model description file is
outputs : int
number of output neurons
""" |
with open('output_semantics.csv', 'wb') as csvfile:
model_description_file = os.path.join(model_folder, "info.yml")
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
logging.info("Start fetching translation dict...")
translation_dict = utils.get_index2data(model_description)
spamwriter = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for output_index in range(outputs):
if output_index in translation_dict:
# Add more information:
# 1. ID in my system
# 2. latex
# 3. unicode code point
# 4. font
# 5. font style
spamwriter.writerow(translation_dict[output_index])
else:
print("No data for %i." % output_index)
spamwriter.writerow(["output %i" % output_index]) |
<SYSTEM_TASK:>
Strip `suffix` from the end of `text` if `text` has that suffix.
<END_TASK>
<USER_TASK:>
Description:
def strip_end(text, suffix):
"""Strip `suffix` from the end of `text` if `text` has that suffix.""" |
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)] |
<SYSTEM_TASK:>
Convert a LaTeX formula to the database index.
<END_TASK>
<USER_TASK:>
Description:
def formula_to_dbid(formula_str, backslash_fix=False):
"""
Convert a LaTeX formula to the database index.
Parameters
----------
formula_str : string
The formula as LaTeX code.
backslash_fix : boolean
If this is set to true, then it will be checked if the same formula
exists with a preceeding backslash.
Returns
-------
int :
The database index.
""" |
global __formula_to_dbid_cache
if __formula_to_dbid_cache is None:
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# Get all formulas that should get examined
sql = ("SELECT `id`, `formula_in_latex` FROM `wm_formula` ")
cursor.execute(sql)
formulas = cursor.fetchall()
__formula_to_dbid_cache = {}
for fm in formulas:
__formula_to_dbid_cache[fm['formula_in_latex']] = fm['id']
if formula_str in __formula_to_dbid_cache:
return __formula_to_dbid_cache[formula_str]
elif backslash_fix and ('\\%s' % formula_str) in __formula_to_dbid_cache:
return __formula_to_dbid_cache['\\%s' % formula_str]
else:
logging.info("Symbol '%s' was not found. Add it to write-math.com.",
formula_str)
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT INTO `wm_formula` (`user_id`, `formula_name`, "
"`formula_in_latex`, "
"`mode`, `package`) VALUES ("
"'10', %s, %s, 'bothmodes', NULL);")
if len(formula_str) < 20:
logging.info("Insert formula %s.", formula_str)
cursor.execute(sql, (formula_str, formula_str))
connection.commit()
__formula_to_dbid_cache[formula_str] = connection.insert_id()
return __formula_to_dbid_cache[formula_str] |
<SYSTEM_TASK:>
Insert recording `hw` into database.
<END_TASK>
<USER_TASK:>
Description:
def insert_recording(hw):
"""Insert recording `hw` into database.""" |
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
cursor = connection.cursor()
sql = ("INSERT INTO `wm_raw_draw_data` ("
"`user_id`, "
"`data`, "
"`md5data`, "
"`creation_date`, "
"`device_type`, "
"`accepted_formula_id`, "
"`secret`, "
"`ip`, "
"`segmentation`, "
"`internal_id`, "
"`description` "
") VALUES (%s, %s, MD5(data), "
"%s, %s, %s, %s, %s, %s, %s, %s);")
data = (hw.user_id,
hw.raw_data_json,
getattr(hw, 'creation_date', None),
getattr(hw, 'device_type', ''),
getattr(hw, 'formula_id', None),
getattr(hw, 'secret', ''),
getattr(hw, 'ip', None),
str(getattr(hw, 'segmentation', '')),
getattr(hw, 'internal_id', ''),
getattr(hw, 'description', ''))
cursor.execute(sql, data)
connection.commit()
for symbol_id, strokes in zip(hw.symbol_stream, hw.segmentation):
insert_symbol_mapping(cursor.lastrowid,
symbol_id,
hw.user_id,
strokes)
logging.info("Insert raw data.")
except pymysql.err.IntegrityError as e:
print("Error: {} (can probably be ignored)".format(e)) |
<SYSTEM_TASK:>
Insert data into `wm_strokes_to_symbol`.
<END_TASK>
<USER_TASK:>
Description:
def insert_symbol_mapping(raw_data_id, symbol_id, user_id, strokes):
"""
Insert data into `wm_strokes_to_symbol`.
Parameters
----------
raw_data_id : int
user_id : int
strokes: list of int
""" |
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT INTO `wm_partial_answer` "
"(`recording_id`, `symbol_id`, `strokes`, `user_id`, "
"`is_accepted`) "
"VALUES (%s, %s, %s, %s, 1);")
data = (raw_data_id,
symbol_id,
",".join([str(stroke) for stroke in strokes]),
user_id)
cursor.execute(sql, data)
connection.commit() |
<SYSTEM_TASK:>
Some labels currently don't work together because of LaTeX naming
<END_TASK>
<USER_TASK:>
Description:
def filter_label(label, replace_by_similar=True):
"""Some labels currently don't work together because of LaTeX naming
clashes. Those will be replaced by simple strings. """ |
bad_names = ['celsius', 'degree', 'ohm', 'venus', 'mars', 'astrosun',
'fullmoon', 'leftmoon', 'female', 'male', 'checked',
'diameter', 'sun', 'Bowtie', 'sqrt',
'cong', 'copyright', 'dag', 'parr', 'notin', 'dotsc',
'mathds', 'mathfrak']
if any(label[1:].startswith(bad) for bad in bad_names):
if label == '\\dag' and replace_by_similar:
return '\\dagger'
elif label == '\\diameter' and replace_by_similar:
return '\\O'
return label[1:]
else:
return label |
<SYSTEM_TASK:>
Apply ``feature`` to all recordings in ``raw_datasets``. Store the results
<END_TASK>
<USER_TASK:>
Description:
def analyze_feature(raw_datasets, feature, basename="aspect_ratios"):
"""
Apply ``feature`` to all recordings in ``raw_datasets``. Store the results
in two files. One file stores the raw result, the other one groups the
results by symbols and stores the mean, standard deviation and the name of
the symbol as a csv file.
Parameters
----------
raw_datasets : List of dictionaries
Each dictionary is a raw_dataset.
feature : An instance of the feature class type
The `feature` which gets analyzed on `raw_datasets`.
basename : string
Name for the file in which the data gets written.
""" |
# Prepare files
csv_file = dam.prepare_file(basename + '.csv')
raw_file = dam.prepare_file(basename + '.raw')
csv_file = open(csv_file, 'a')
raw_file = open(raw_file, 'a')
csv_file.write("label,mean,std\n") # Write header
raw_file.write("latex,raw_data_id,value\n") # Write header
print_data = []
for _, datasets in dam.sort_by_formula_id(raw_datasets).items():
values = []
for data in datasets:
value = feature(data)[0]
values.append(value)
raw_file.write("%s,%i,%0.2f\n" % (datasets[0].formula_in_latex,
data.raw_data_id,
value))
label = filter_label(datasets[0].formula_in_latex)
print_data.append((label, numpy.mean(values), numpy.std(values)))
# Sort the data by highest mean, descending
print_data = sorted(print_data, key=lambda n: n[1], reverse=True)
# Write data to file
for label, mean, std in print_data:
csv_file.write("%s,%0.2f,%0.2f\n" % (label, mean, std))
csv_file.close() |
<SYSTEM_TASK:>
Start the creation of the wanted metric.
<END_TASK>
<USER_TASK:>
Description:
def main(handwriting_datasets_file, analyze_features):
"""Start the creation of the wanted metric.""" |
# Load from pickled file
logging.info("Start loading data '%s' ...", handwriting_datasets_file)
loaded = pickle.load(open(handwriting_datasets_file))
raw_datasets = loaded['handwriting_datasets']
logging.info("%i datasets loaded.", len(raw_datasets))
logging.info("Start analyzing...")
if analyze_features:
featurelist = [(features.AspectRatio(), "aspect_ratio.csv"),
(features.ReCurvature(1), "re_curvature.csv"),
(features.Height(), "height.csv"),
(features.Width(), "width.csv"),
(features.Time(), "time.csv"),
(features.Ink(), "ink.csv"),
(features.StrokeCount(), "stroke-count.csv")]
for feat, filename in featurelist:
logging.info("create %s...", filename)
analyze_feature(raw_datasets, feat, filename)
# Analyze everything specified in configuration
cfg = utils.get_project_configuration()
if 'data_analyzation_queue' in cfg:
metrics = dam.get_metrics(cfg['data_analyzation_queue'])
for metric in metrics:
logging.info("Start metric %s...", str(metric))
metric(raw_datasets)
else:
logging.info("No 'data_analyzation_queue' in ~/.hwrtrc") |
<SYSTEM_TASK:>
If `latex` is surrounded by matching braces, remove them. They are not
<END_TASK>
<USER_TASK:>
Description:
def remove_matching_braces(latex):
"""
If `latex` is surrounded by matching braces, remove them. They are not
necessary.
Parameters
----------
latex : string
Returns
-------
string
Examples
--------
>>> remove_matching_braces('{2+2}')
'2+2'
>>> remove_matching_braces('{2+2')
'{2+2'
""" |
if latex.startswith('{') and latex.endswith('}'):
opened = 1
matches = True
for char in latex[1:-1]:
if char == '{':
opened += 1
elif char == '}':
opened -= 1
if opened == 0:
matches = False
if matches:
latex = latex[1:-1]
return latex |
<SYSTEM_TASK:>
Read all files of `folder` and return a list of HandwrittenData
<END_TASK>
<USER_TASK:>
Description:
def read_folder(folder):
"""Read all files of `folder` and return a list of HandwrittenData
objects.
Parameters
----------
folder : string
Path to a folder
Returns
-------
list :
A list of all .ink files in the given folder.
""" |
recordings = []
for filename in glob.glob(os.path.join(folder, '*.ink')):
recording = parse_scg_ink_file(filename)
recordings.append(recording)
return recordings |
<SYSTEM_TASK:>
Get a list of colors which is as long as the segmentation.
<END_TASK>
<USER_TASK:>
Description:
def _get_colors(segmentation):
"""Get a list of colors which is as long as the segmentation.
Parameters
----------
segmentation : list of lists
Returns
-------
list
A list of colors.
""" |
symbol_count = len(segmentation)
num_colors = symbol_count
# See http://stackoverflow.com/a/20298116/562769
color_array = [
"#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941",
"#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC",
"#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693",
"#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9",
"#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299",
"#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500",
"#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68",
"#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0",
"#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9",
"#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF",
"#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500",
"#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0",
"#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55", "#0089A3",
"#CB7E98", "#A4E804", "#324E72", "#6A3A4C", "#83AB58", "#001C1E",
"#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
"#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4",
"#1E0200", "#5B4E51", "#C895C5", "#320033", "#FF6832", "#66E1D3",
"#CFCDAC", "#D0AC94", "#7ED379", "#012C58"]
# Apply a little trick to make sure we have enough colors, no matter
# how many symbols are in one recording.
# This simply appends the color array as long as necessary to get enough
# colors
new_array = color_array[:]
while len(new_array) <= num_colors:
new_array += color_array
return new_array[:num_colors] |
<SYSTEM_TASK:>
Some recordings have wrong times. Fix them so that nothing after
<END_TASK>
<USER_TASK:>
Description:
def fix_times(self):
"""
Some recordings have wrong times. Fix them so that nothing after
loading a handwritten recording breaks.
""" |
pointlist = self.get_pointlist()
times = [point['time'] for stroke in pointlist for point in stroke]
times_min = max(min(times), 0) # Make sure this is not None
for i, stroke in enumerate(pointlist):
for j, point in enumerate(stroke):
if point['time'] is None:
pointlist[i][j]['time'] = times_min
else:
times_min = point['time']
self.raw_data_json = json.dumps(pointlist) |
<SYSTEM_TASK:>
Get a list of lists of tuples from JSON raw data string. Those lists
<END_TASK>
<USER_TASK:>
Description:
def get_pointlist(self):
"""
Get a list of lists of tuples from JSON raw data string. Those lists
represent strokes with control points.
Returns
-------
list :
A list of strokes. Each stroke is a list of dictionaries
{'x': 123, 'y': 42, 'time': 1337}
""" |
try:
pointlist = json.loads(self.raw_data_json)
except Exception as inst:
logging.debug("pointStrokeList: strokelistP")
logging.debug(self.raw_data_json)
logging.debug("didn't work")
raise inst
if len(pointlist) == 0:
logging.warning("Pointlist was empty. Search for '" +
self.raw_data_json + "' in `wm_raw_draw_data`.")
return pointlist |
<SYSTEM_TASK:>
Make sure that the points and strokes are in order.
<END_TASK>
<USER_TASK:>
Description:
def get_sorted_pointlist(self):
"""
Make sure that the points and strokes are in order.
Returns
-------
list
A list of all strokes in the recording. Each stroke is represented
as a list of dicts {'time': 123, 'x': 45, 'y': 67}
""" |
pointlist = self.get_pointlist()
for i in range(len(pointlist)):
pointlist[i] = sorted(pointlist[i], key=lambda p: p['time'])
pointlist = sorted(pointlist, key=lambda stroke: stroke[0]['time'])
return pointlist |
<SYSTEM_TASK:>
Overwrite pointlist.
<END_TASK>
<USER_TASK:>
Description:
def set_pointlist(self, pointlist):
"""Overwrite pointlist.
Parameters
----------
pointlist : a list of strokes; each stroke is a list of points
The inner lists represent strokes. Every stroke consists of points.
Every point is a dictinary with 'x', 'y', 'time'.
""" |
assert type(pointlist) is list, \
"pointlist is not of type list, but %r" % type(pointlist)
assert len(pointlist) >= 1, \
"The pointlist of formula_id %i is %s" % (self.formula_id,
self.get_pointlist())
self.raw_data_json = json.dumps(pointlist) |
<SYSTEM_TASK:>
Get the bounding box of a pointlist.
<END_TASK>
<USER_TASK:>
Description:
def get_bounding_box(self):
""" Get the bounding box of a pointlist. """ |
pointlist = self.get_pointlist()
# Initialize bounding box parameters to save values
minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"]
miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"]
mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"]
# Adjust parameters
for stroke in pointlist:
for p in stroke:
minx, maxx = min(minx, p["x"]), max(maxx, p["x"])
miny, maxy = min(miny, p["y"]), max(maxy, p["y"])
mint, maxt = min(mint, p["time"]), max(maxt, p["time"])
return {"minx": minx, "maxx": maxx, "miny": miny, "maxy": maxy,
"mint": mint, "maxt": maxt} |
<SYSTEM_TASK:>
Get a bitmap of the object at a given instance of time. If time is
<END_TASK>
<USER_TASK:>
Description:
def get_bitmap(self, time=None, size=32, store_path=None):
"""
Get a bitmap of the object at a given instance of time. If time is
`None`,`then the bitmap is generated for the last point in time.
Parameters
----------
time : int or None
size : int
Size in pixels. The resulting bitmap will be (size x size).
store_path : None or str
If this is set, then the image will be saved there.
Returns
-------
numpy array :
Greyscale png image
""" |
# bitmap_width = int(self.get_width()*size) + 2
# bitmap_height = int(self.get_height()*size) + 2
img = Image.new('L', (size, size), 'black')
draw = ImageDraw.Draw(img, 'L')
bb = self.get_bounding_box()
for stroke in self.get_sorted_pointlist():
for p1, p2 in zip(stroke, stroke[1:]):
if time is not None and \
(p1['time'] > time or p2['time'] > time):
continue
y_from = int((-bb['miny'] + p1['y']) /
max(self.get_height(), 1)*size)
x_from = int((-bb['minx'] + p1['x']) /
max(self.get_width(), 1)*size)
y_to = int((-bb['miny'] + p2['y']) /
max(self.get_height(), 1)*size)
x_to = int((-bb['minx'] + p2['x']) /
max(self.get_width(), 1)*size)
draw.line([x_from, y_from, x_to, y_to],
fill='#ffffff',
width=1)
del draw
if store_path is not None:
img.save(store_path)
return numpy.asarray(img) |
<SYSTEM_TASK:>
Apply preprocessing algorithms.
<END_TASK>
<USER_TASK:>
Description:
def preprocessing(self, algorithms):
"""Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> import preprocessing
>>> a = HandwrittenData(...)
>>> preprocessing_queue = [(preprocessing.scale_and_shift, []),
... (preprocessing.connect_strokes, []),
... (preprocessing.douglas_peucker,
... {'EPSILON': 0.2}),
... (preprocessing.space_evenly,
... {'number': 100,
... 'KIND': 'cubic'})]
>>> a.preprocessing(preprocessing_queue)
""" |
assert type(algorithms) is list
for algorithm in algorithms:
algorithm(self) |
<SYSTEM_TASK:>
Get a list of features.
<END_TASK>
<USER_TASK:>
Description:
def feature_extraction(self, algorithms):
"""Get a list of features.
Every algorithm has to return the features as a list.""" |
assert type(algorithms) is list
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert len(new_features) == algorithm.get_dimension(), \
"Expected %i features from algorithm %s, got %i features" % \
(algorithm.get_dimension(), str(algorithm), len(new_features))
features += new_features
return features |
<SYSTEM_TASK:>
Show the data graphically in a new pop-up window.
<END_TASK>
<USER_TASK:>
Description:
def show(self):
"""Show the data graphically in a new pop-up window.""" |
# prevent the following error:
# '_tkinter.TclError: no display name and no $DISPLAY environment
# variable'
# import matplotlib
# matplotlib.use('GTK3Agg', warn=False)
import matplotlib.pyplot as plt
pointlist = self.get_pointlist()
if 'pen_down' in pointlist[0][0]:
assert len(pointlist) > 1, \
"Lenght of pointlist was %i. Got: %s" % (len(pointlist),
pointlist)
# Create a new pointlist that models pen-down strokes and pen
# up strokes
new_pointlist = []
last_pendown_state = None
stroke = []
for point in pointlist[0]:
if last_pendown_state is None:
last_pendown_state = point['pen_down']
if point['pen_down'] != last_pendown_state:
new_pointlist.append(stroke)
last_pendown_state = point['pen_down']
stroke = []
else:
stroke.append(point)
new_pointlist.append(stroke) # add the last stroke
pointlist = new_pointlist
_, ax = plt.subplots()
ax.set_title("Raw data id: %s, "
"Formula_id: %s" % (str(self.raw_data_id),
str(self.formula_id)))
colors = _get_colors(self.segmentation)
for symbols, color in zip(self.segmentation, colors):
for stroke_index in symbols:
stroke = pointlist[stroke_index]
xs, ys = [], []
for p in stroke:
xs.append(p['x'])
ys.append(p['y'])
if "pen_down" in stroke[0] and stroke[0]["pen_down"] is False:
plt.plot(xs, ys, '-x', color=color)
else:
plt.plot(xs, ys, '-o', color=color)
plt.gca().invert_yaxis()
ax.set_aspect('equal')
plt.show() |
<SYSTEM_TASK:>
Count all strokes of this recording that have only a single dot.
<END_TASK>
<USER_TASK:>
Description:
def count_single_dots(self):
"""Count all strokes of this recording that have only a single dot.
""" |
pointlist = self.get_pointlist()
single_dots = 0
for stroke in pointlist:
if len(stroke) == 1:
single_dots += 1
return single_dots |
<SYSTEM_TASK:>
Convert this HandwrittenData object into a list of HandwrittenData
<END_TASK>
<USER_TASK:>
Description:
def to_single_symbol_list(self):
"""
Convert this HandwrittenData object into a list of HandwrittenData
objects. Each element of the list is a single symbol.
Returns
-------
list of HandwrittenData objects
""" |
symbol_stream = getattr(self,
'symbol_stream',
[None for symbol in self.segmentation])
single_symbols = []
pointlist = self.get_sorted_pointlist()
for stroke_indices, label in zip(self.segmentation, symbol_stream):
strokes = []
for stroke_index in stroke_indices:
strokes.append(pointlist[stroke_index])
single_symbols.append(HandwrittenData(json.dumps(strokes),
formula_id=label))
return single_symbols |
<SYSTEM_TASK:>
return the addon version number, with a developmental version increment
<END_TASK>
<USER_TASK:>
Description:
def get_git_postversion(addon_dir):
""" return the addon version number, with a developmental version increment
if there were git commits in the addon_dir after the last version change.
If the last change to the addon correspond to the version number in the
manifest it is used as is for the python package version. Otherwise a
counter is incremented for each commit and resulting version number has
the following form: [8|9].0.x.y.z.1devN, N being the number of git
commits since the version change.
Note: we use .99.devN because:
* pip ignores .postN by design (https://github.com/pypa/pip/issues/2872)
* x.y.z.devN is anterior to x.y.z
Note: we don't put the sha1 of the commit in the version number because
this is not PEP 440 compliant and is therefore misinterpreted by pip.
""" |
addon_dir = os.path.realpath(addon_dir)
last_version = read_manifest(addon_dir).get('version', '0.0.0')
last_version_parsed = parse_version(last_version)
if not is_git_controlled(addon_dir):
return last_version
if get_git_uncommitted(addon_dir):
uncommitted = True
count = 1
else:
uncommitted = False
count = 0
last_sha = None
git_root = get_git_root(addon_dir)
for sha in git_log_iterator(addon_dir):
try:
manifest = read_manifest_from_sha(sha, addon_dir, git_root)
except NoManifestFound:
break
version = manifest.get('version', '0.0.0')
version_parsed = parse_version(version)
if version_parsed != last_version_parsed:
break
if last_sha is None:
last_sha = sha
else:
count += 1
if not count:
return last_version
if last_sha:
return last_version + ".99.dev%s" % count
if uncommitted:
return last_version + ".dev1"
# if everything is committed, the last commit
# must have the same version as current,
# so last_sha must be set and we'll never reach this branch
return last_version |
<SYSTEM_TASK:>
Detect Odoo version from an addons directory
<END_TASK>
<USER_TASK:>
Description:
def _get_odoo_version_info(addons_dir, odoo_version_override=None):
""" Detect Odoo version from an addons directory """ |
odoo_version_info = None
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
manifest = read_manifest(addon_dir)
_, _, addon_odoo_version_info = _get_version(
addon_dir, manifest, odoo_version_override,
git_post_version=False)
if odoo_version_info is not None and \
odoo_version_info != addon_odoo_version_info:
raise DistutilsSetupError("Not all addons are for the same "
"odoo version in %s (error detected "
"in %s)" % (addons_dir, addon))
odoo_version_info = addon_odoo_version_info
return odoo_version_info |
<SYSTEM_TASK:>
Get addon version information from an addon directory
<END_TASK>
<USER_TASK:>
Description:
def _get_version(addon_dir, manifest, odoo_version_override=None,
git_post_version=True):
""" Get addon version information from an addon directory """ |
version = manifest.get('version')
if not version:
warn("No version in manifest in %s" % addon_dir)
version = '0.0.0'
if not odoo_version_override:
if len(version.split('.')) < 5:
raise DistutilsSetupError("Version in manifest must have at least "
"5 components and start with "
"the Odoo series number in %s" %
addon_dir)
odoo_version = '.'.join(version.split('.')[:2])
else:
odoo_version = odoo_version_override
if odoo_version not in ODOO_VERSION_INFO:
raise DistutilsSetupError("Unsupported odoo version '%s' in %s" %
(odoo_version, addon_dir))
odoo_version_info = ODOO_VERSION_INFO[odoo_version]
if git_post_version:
version = get_git_postversion(addon_dir)
return version, odoo_version, odoo_version_info |
<SYSTEM_TASK:>
Get the list of requirements for an addon
<END_TASK>
<USER_TASK:>
Description:
def get_install_requires_odoo_addon(addon_dir,
no_depends=[],
depends_override={},
external_dependencies_override={},
odoo_version_override=None):
""" Get the list of requirements for an addon """ |
manifest = read_manifest(addon_dir)
_, _, odoo_version_info = _get_version(addon_dir,
manifest,
odoo_version_override,
git_post_version=False)
return _get_install_requires(odoo_version_info,
manifest,
no_depends,
depends_override,
external_dependencies_override) |
<SYSTEM_TASK:>
Get the list of requirements for a directory containing addons
<END_TASK>
<USER_TASK:>
Description:
def get_install_requires_odoo_addons(addons_dir,
depends_override={},
external_dependencies_override={},
odoo_version_override=None):
""" Get the list of requirements for a directory containing addons """ |
addon_dirs = []
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
addon_dirs.append(addon_dir)
install_requires = set()
for addon_dir in addon_dirs:
r = get_install_requires_odoo_addon(
addon_dir,
no_depends=addons,
depends_override=depends_override,
external_dependencies_override=external_dependencies_override,
odoo_version_override=odoo_version_override,
)
install_requires.update(r)
return sorted(install_requires) |
<SYSTEM_TASK:>
Override parent function with alchy's
<END_TASK>
<USER_TASK:>
Description:
def make_declarative_base(self, metadata=None):
"""Override parent function with alchy's""" |
return make_declarative_base(self.session,
Model=self.Model,
metadata=metadata) |
<SYSTEM_TASK:>
This method Validates, gets the Python value, checks unique indexes,
<END_TASK>
<USER_TASK:>
Description:
def prep_doc(self, doc_obj):
"""
This method Validates, gets the Python value, checks unique indexes,
gets the db value, and then returns the prepared doc dict object.
Useful for save and backup functions.
@param doc_obj:
@return:
""" |
doc = doc_obj._data.copy()
for key, prop in list(doc_obj._base_properties.items()):
prop.validate(doc.get(key), key)
raw_value = prop.get_python_value(doc.get(key))
if prop.unique:
self.check_unique(doc_obj, key, raw_value)
value = prop.get_db_value(raw_value)
doc[key] = value
doc['_doc_type'] = get_doc_type(doc_obj.__class__)
return doc |
<SYSTEM_TASK:>
Package, create and deploy to Lambda.
<END_TASK>
<USER_TASK:>
Description:
def deploy(environment, zappa_settings):
""" Package, create and deploy to Lambda.""" |
print(("Deploying " + environment))
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Make sure the necessary IAM execution roles are available
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.create_lambda_function(bucket=s3_bucket_name,
s3_key=zip_path,
function_name=lambda_name,
handler='handler.lambda_handler',
vpc_config=settings['vpc_config'],
memory_size=settings['memory_size'])
# Create and configure the API Gateway
api_id = zappa.create_api_gateway_routes(lambda_arn, lambda_name)
# Deploy the API!
endpoint_url = zappa.deploy_api_gateway(api_id, environment)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
if settings['touch']:
requests.get(endpoint_url)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print(("Your Zappa deployment is live!: " + endpoint_url)) |
<SYSTEM_TASK:>
Update an existing deployment.
<END_TASK>
<USER_TASK:>
Description:
def update(environment, zappa_settings):
""" Update an existing deployment.""" |
print(("Updating " + environment))
# Package dependencies, and the source code into a zip
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Update IAM roles if needed
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.update_lambda_function(s3_bucket_name, zip_path,
lambda_name)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print("Your updated Zappa deployment is live!") |
<SYSTEM_TASK:>
An AWS Lambda function which parses specific API Gateway input into a
<END_TASK>
<USER_TASK:>
Description:
def lambda_handler(event, context, settings_name="zappa_settings"):
""" An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to Flask, procceses the Flask response, and returns
that back to the API Gateway.
""" |
# Loading settings from a python module
settings = importlib.import_module(settings_name)
# The flask-app module
app_module = importlib.import_module(settings.APP_MODULE)
# The flask-app
app = getattr(app_module, settings.APP_OBJECT)
app.config.from_object('zappa_settings')
app.wsgi_app = ZappaWSGIMiddleware(app.wsgi_app)
# This is a normal HTTP request
if event.get('method', None):
# If we just want to inspect this,
# return this event instead of processing the request
# https://your_api.aws-api.com/?event_echo=true
event_echo = getattr(settings, "EVENT_ECHO", True)
if event_echo:
if 'event_echo' in list(event['params'].values()):
return {'Content': str(event) + '\n' + str(context), 'Status': 200}
# TODO: Enable Let's Encrypt
# # If Let's Encrypt is defined in the settings,
# # and the path is your.domain.com/.well-known/acme-challenge/{{lets_encrypt_challenge_content}},
# # return a 200 of lets_encrypt_challenge_content.
# lets_encrypt_challenge_path = getattr(settings, "LETS_ENCRYPT_CHALLENGE_PATH", None)
# lets_encrypt_challenge_content = getattr(settings, "LETS_ENCRYPT_CHALLENGE_CONTENT", None)
# if lets_encrypt_challenge_path:
# if len(event['params']) == 3:
# if event['params']['parameter_1'] == '.well-known' and \
# event['params']['parameter_2'] == 'acme-challenge' and \
# event['params']['parameter_3'] == lets_encrypt_challenge_path:
# return {'Content': lets_encrypt_challenge_content, 'Status': 200}
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME,
trailing_slash=False)
# We are always on https on Lambda, so tell our wsgi app that.
environ['wsgi.url_scheme'] = 'https'
response = Response.from_app(app, environ)
# This doesn't work. It should probably be set right after creation, not
# at such a late stage.
# response.autocorrect_location_header = False
zappa_returndict = dict()
if response.data:
zappa_returndict['Content'] = response.data
# Pack the WSGI response into our special dictionary.
for (header_name, header_value) in response.headers:
zappa_returndict[header_name] = header_value
zappa_returndict['Status'] = response.status_code
# TODO: No clue how to handle the flask-equivalent of this. Or is this
# something entirely specified by the middleware?
# # Parse the WSGI Cookie and pack it.
# cookie = response.cookies.output()
# if ': ' in cookie:
# zappa_returndict['Set-Cookie'] = response.cookies.output().split(': ')[1]
# To ensure correct status codes, we need to
# pack the response as a deterministic B64 string and raise it
# as an error to match our APIGW regex.
# The DOCTYPE ensures that the page still renders in the browser.
if response.status_code in [400, 401, 403, 404, 500]:
content = "<!DOCTYPE html>" + str(response.status_code) + response.data
b64_content = base64.b64encode(content)
raise Exception(b64_content)
# Internal are changed to become relative redirects
# so they still work for apps on raw APIGW and on a domain.
elif response.status_code in [301, 302]:
# Location is by default relative on Flask. Location is by default
# absolute on Werkzeug. We can set autocorrect_location_header on
# the response to False, but it doesn't work. We have to manually
# remove the host part.
location = response.location
hostname = 'https://' + environ['HTTP_HOST']
if location.startswith(hostname):
location = location[len(hostname):]
raise Exception(location)
else:
return zappa_returndict |
<SYSTEM_TASK:>
Get the context for this view.
<END_TASK>
<USER_TASK:>
Description:
def get_context_data(self, **kwargs):
"""Get the context for this view.
Also adds the *page_template* variable in the context.
If the *page_template* is not given as a kwarg of the *as_view*
method then it is generated using app label, model name
(obviously if the list is a queryset), *self.template_name_suffix*
and *self.page_template_suffix*.
For instance, if the list is a queryset of *blog.Entry*,
the template will be ``blog/entry_list_page.html``.
""" |
queryset = kwargs.pop('object_list')
page_template = kwargs.pop('page_template', None)
context_object_name = self.get_context_object_name(queryset)
context = {'object_list': queryset, 'view': self}
context.update(kwargs)
if context_object_name is not None:
context[context_object_name] = queryset
if page_template is None:
if hasattr(queryset, 'model'):
page_template = self.get_page_template(**kwargs)
else:
raise ImproperlyConfigured(
'AjaxListView requires a page_template')
context['page_template'] = self.page_template = page_template
return context |
<SYSTEM_TASK:>
Turn text into a valid python classname or variable
<END_TASK>
<USER_TASK:>
Description:
def clean_var(text):
"""Turn text into a valid python classname or variable""" |
text = re_invalid_var.sub('', text)
text = re_invalid_start.sub('', text)
return text |
<SYSTEM_TASK:>
List of all failed tasks caused by this and all previous errors.
<END_TASK>
<USER_TASK:>
Description:
def full_tasktrace(self):
"""
List of all failed tasks caused by this and all previous errors.
Returns:
List[Task]
""" |
if self.prev_error:
return self.prev_error.tasktrace + self.tasktrace
else:
return self.tasktrace |
<SYSTEM_TASK:>
For fast length comparison
<END_TASK>
<USER_TASK:>
Description:
def dist_sq(self, other=None):
""" For fast length comparison """ |
v = self - other if other else self
return sum(map(lambda a: a * a, v)) |
<SYSTEM_TASK:>
Calculate the yaw and pitch of this vector
<END_TASK>
<USER_TASK:>
Description:
def yaw_pitch(self):
"""
Calculate the yaw and pitch of this vector
""" |
if not self:
return YawPitch(0, 0)
ground_distance = math.sqrt(self.x ** 2 + self.z ** 2)
if ground_distance:
alpha1 = -math.asin(self.x / ground_distance) / math.pi * 180
alpha2 = math.acos(self.z / ground_distance) / math.pi * 180
if alpha2 > 90:
yaw = 180 - alpha1
else:
yaw = alpha1
pitch = math.atan2(-self.y, ground_distance) / math.pi * 180
else:
yaw = 0
y = round(self.y)
if y > 0:
pitch = -90
elif y < 0:
pitch = 90
else:
pitch = 0
return YawPitch(yaw, pitch) |
<SYSTEM_TASK:>
Creates and returns a function that takes a slot
<END_TASK>
<USER_TASK:>
Description:
def make_slot_check(wanted):
"""
Creates and returns a function that takes a slot
and checks if it matches the wanted item.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
""" |
if isinstance(wanted, types.FunctionType):
return wanted # just forward the slot check function
if isinstance(wanted, int):
item, meta = wanted, None
elif isinstance(wanted, Slot):
item, meta = wanted.item_id, wanted.damage # TODO compare NBT
elif isinstance(wanted, (Item, Block)):
item, meta = wanted.id, wanted.metadata
elif isinstance(wanted, str):
item_or_block = get_item_or_block(wanted, init=True)
item, meta = item_or_block.id, item_or_block.metadata
else: # wanted is (id, meta)
try:
item, meta = wanted
except TypeError:
raise ValueError('Illegal args for make_slot_check(): %s' % wanted)
return lambda slot: item == slot.item_id and meta in (None, slot.damage) |
<SYSTEM_TASK:>
Creates a new class for that window and registers it at this module.
<END_TASK>
<USER_TASK:>
Description:
def _make_window(window_dict):
"""
Creates a new class for that window and registers it at this module.
""" |
cls_name = '%sWindow' % camel_case(str(window_dict['name']))
bases = (Window,)
attrs = {
'__module__': sys.modules[__name__],
'name': str(window_dict['name']),
'inv_type': str(window_dict['id']),
'inv_data': window_dict,
}
# creates function-local index and size variables
def make_slot_method(index, size=1):
if size == 1:
return lambda self: self.slots[index]
else:
return lambda self: self.slots[index:(index + size)]
for slots in window_dict.get('slots', []):
index = slots['index']
size = slots.get('size', 1)
attr_name = snake_case(str(slots['name']))
attr_name += '_slot' if size == 1 else '_slots'
slots_method = make_slot_method(index, size)
slots_method.__name__ = attr_name
attrs[attr_name] = property(slots_method)
for i, prop_name in enumerate(window_dict.get('properties', [])):
def make_prop_method(i):
return lambda self: self.properties[i]
prop_method = make_prop_method(i)
prop_name = snake_case(str(prop_name))
prop_method.__name__ = prop_name
attrs[prop_name] = property(prop_method)
cls = type(cls_name, bases, attrs)
assert not hasattr(sys.modules[__name__], cls_name), \
'Window "%s" already registered at %s' % (cls_name, __name__)
setattr(sys.modules[__name__], cls_name, cls)
return cls |
<SYSTEM_TASK:>
Formats the slot for network packing.
<END_TASK>
<USER_TASK:>
Description:
def get_dict(self):
""" Formats the slot for network packing. """ |
data = {'id': self.item_id}
if self.item_id != constants.INV_ITEMID_EMPTY:
data['damage'] = self.damage
data['amount'] = self.amount
if self.nbt is not None:
data['enchants'] = self.nbt
return data |
<SYSTEM_TASK:>
Called when the click was successful
<END_TASK>
<USER_TASK:>
Description:
def on_success(self, inv_plugin, emit_set_slot):
"""
Called when the click was successful
and should be applied to the inventory.
Args:
inv_plugin (InventoryPlugin): inventory plugin instance
emit_set_slot (func): function to signal a slot change,
should be InventoryPlugin().emit_set_slot
""" |
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot) |
<SYSTEM_TASK:>
Generate an access token using an username and password. Any existing
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self):
"""
Generate an access token using an username and password. Any existing
client token is invalidated if not provided.
Returns:
dict: Response or error dict
""" |
endpoint = '/authenticate'
payload = {
'agent': {
'name': 'Minecraft',
'version': self.ygg_version,
},
'username': self.username,
'password': self.password,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.available_profiles = rep['availableProfiles']
self.selected_profile = rep['selectedProfile']
return True |
<SYSTEM_TASK:>
Check if an access token is valid
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
"""
Check if an access token is valid
Returns:
dict: Empty or error dict
""" |
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return not bool(rep) |
<SYSTEM_TASK:>
Calculates the total number of items of that type
<END_TASK>
<USER_TASK:>
Description:
def total_stored(self, wanted, slots=None):
"""
Calculates the total number of items of that type
in the current window or given slot range.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
""" |
if slots is None:
slots = self.window.slots
wanted = make_slot_check(wanted)
return sum(slot.amount for slot in slots if wanted(slot)) |
<SYSTEM_TASK:>
Searches the given slots or, if not given,
<END_TASK>
<USER_TASK:>
Description:
def find_slot(self, wanted, slots=None):
"""
Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found.
""" |
for slot in self.find_slots(wanted, slots):
return slot
return None |
<SYSTEM_TASK:>
Yields all slots containing the item.
<END_TASK>
<USER_TASK:>
Description:
def find_slots(self, wanted, slots=None):
"""
Yields all slots containing the item.
Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
""" |
if slots is None:
slots = self.inv_slots_preferred + self.window.window_slots
wanted = make_slot_check(wanted)
for slot in slots:
if wanted(slot):
yield slot |
<SYSTEM_TASK:>
Left-click or right-click the slot.
<END_TASK>
<USER_TASK:>
Description:
def click_slot(self, slot, right=False):
"""
Left-click or right-click the slot.
Args:
slot (Slot): The clicked slot. Can be ``Slot`` instance or integer.
Set to ``inventory.cursor_slot``
for clicking outside the window.
""" |
if isinstance(slot, int):
slot = self.window.slots[slot]
button = constants.INV_BUTTON_RIGHT \
if right else constants.INV_BUTTON_LEFT
return self.send_click(windows.SingleClick(slot, button)) |
<SYSTEM_TASK:>
Drop one or all items of the slot.
<END_TASK>
<USER_TASK:>
Description:
def drop_slot(self, slot=None, drop_stack=False):
"""
Drop one or all items of the slot.
Does not wait for confirmation from the server. If you want that,
use a ``Task`` and ``yield inventory.async.drop_slot()`` instead.
If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty,
the currently held item (``active_slot``).
Args:
slot (Optional[Slot]): The dropped slot. Can be None, integer,
or ``Slot`` instance.
Returns:
int: The action ID of the click
""" |
if slot is None:
if self.cursor_slot.is_empty:
slot = self.active_slot
else:
slot = self.cursor_slot
elif isinstance(slot, int): # also allow slot nr
slot = self.window.slots[slot]
if slot == self.cursor_slot:
# dropping items from cursor is done via normal click
return self.click_slot(self.cursor_slot, not drop_stack)
return self.send_click(windows.DropClick(slot, drop_stack)) |
<SYSTEM_TASK:>
List of all available inventory slots in the preferred search order.
<END_TASK>
<USER_TASK:>
Description:
def inv_slots_preferred(self):
"""
List of all available inventory slots in the preferred search order.
Does not include the additional slots from the open window.
1. active slot
2. remainder of the hotbar
3. remainder of the persistent inventory
""" |
slots = [self.active_slot]
slots.extend(slot for slot in self.window.hotbar_slots
if slot != self.active_slot)
slots.extend(self.window.inventory_slots)
return slots |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.