code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def q(segmentation, s1, s2):
index1 = find_index(segmentation, s1)
index2 = find_index(segmentation, s2)
return index1 == index2 | Test if ``s1`` and ``s2`` are in the same symbol, given the
``segmentation``. |
def score_segmentation(segmentation, table):
stroke_nr = sum(1 for symbol in segmentation for stroke in symbol)
score = 1
for i in range(stroke_nr):
for j in range(i+1, stroke_nr):
qval = q(segmentation, i, j)
if qval:
score *= table[i][j]
else:
score *= table[j][i]
return score | Get the score of a segmentation. |
def get_top_segmentations(table, n):
stroke_count = list(range(len(table)))
topf = TopFinder(n)
for curr_segmentation in all_segmentations(stroke_count):
curr_seg_score = score_segmentation(curr_segmentation, table)
topf.push(curr_segmentation, curr_seg_score)
for el, score in topf:
yield [normalize_segmentation(el), score] | Parameters
----------
table : matrix of probabilities
Each cell (i, j) of `table` gives the probability that i and j are in
the same symbol.
n : int
Number of best segmentations which get returned |
def push(self, element, value):
insert_pos = 0
for index, el in enumerate(self.tops):
if not self.find_min and el[1] >= value:
insert_pos = index+1
elif self.find_min and el[1] <= value:
insert_pos = index+1
self.tops.insert(insert_pos, [element, value])
self.tops = self.tops[:self.n] | Push an ``element`` into the datastrucutre together with its value
and only save it if it currently is one of the top n elements.
Drop elements if necessary. |
def _array2cstr(arr):
out = StringIO()
np.save(out, arr)
return b64encode(out.getvalue()) | Serializes a numpy array to a compressed base64 string |
def _str2array(d):
if type(d) == list:
return np.asarray([_str2array(s) for s in d])
ins = StringIO(d)
return np.loadtxt(ins) | Reconstructs a numpy array from a plain-text string |
def create_output_semantics(model_folder, outputs):
with open('output_semantics.csv', 'wb') as csvfile:
model_description_file = os.path.join(model_folder, "info.yml")
with open(model_description_file, 'r') as ymlfile:
model_description = yaml.load(ymlfile)
logging.info("Start fetching translation dict...")
translation_dict = utils.get_index2data(model_description)
spamwriter = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for output_index in range(outputs):
if output_index in translation_dict:
# Add more information:
# 1. ID in my system
# 2. latex
# 3. unicode code point
# 4. font
# 5. font style
spamwriter.writerow(translation_dict[output_index])
else:
print("No data for %i." % output_index)
spamwriter.writerow(["output %i" % output_index]) | Create a 'output_semantics.csv' file which contains information what the
output of the single output neurons mean.
Parameters
----------
model_folder : str
folder where the model description file is
outputs : int
number of output neurons |
def elementtree_to_dict(element):
d = dict()
if hasattr(element, 'text') and element.text is not None:
d['text'] = element.text
d.update(element.items()) # element's attributes
for c in list(element): # element's children
if c.tag not in d:
d[c.tag] = elementtree_to_dict(c)
# an element with the same tag was already in the dict
else:
# if it's not a list already, convert it to a list and append
if not isinstance(d[c.tag], list):
d[c.tag] = [d[c.tag], elementtree_to_dict(c)]
# append to the list
else:
d[c.tag].append(elementtree_to_dict(c))
return d | Convert an xml ElementTree to a dictionary. |
def strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text)-len(suffix)] | Strip `suffix` from the end of `text` if `text` has that suffix. |
def get_data_multiplication_queue(model_description_multiply):
return utils.get_objectlist(model_description_multiply,
config_key='data_multiplication',
module=sys.modules[__name__]) | Get features from a list of dictionaries
>>> l = [{'Multiply': [{'nr': 1}]}, \
{'Rotate': [{'minimum':-30}, {'maximum': 30}, {'step': 5}]}]
>>> get_data_multiplication_queue(l)
[Multiply (1 times), Rotate (-30.00, 30.00, 5.00)] |
def getuserid(username, copyright_str):
global username2id
if username not in username2id:
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT IGNORE INTO `wm_users` ("
"`display_name` , "
"`password` ,"
"`account_type` ,"
"`confirmation_code` ,"
"`status` ,"
"`description`"
") "
"VALUES ("
"%s, '', 'Regular User', '', 'activated', %s"
");")
cursor.execute(sql, (username, copyright_str))
connection.commit()
# Get the id
try:
sql = ("SELECT `id` FROM `wm_users` "
"WHERE `display_name` = %s LIMIT 1")
cursor.execute(sql, username)
uid = cursor.fetchone()['id']
except Exception as inst:
logging.debug("username not found: %s", username)
print(inst)
# logging.info("%s: %s", username, uid)
username2id[username] = uid
return username2id[username] | Get the ID of the user with `username` from write-math.com. If he
doesn't exist by now, create it. Add `copyright_str` as a description.
Parameters
----------
username : string
Name of a user.
copyright_str : string
Description text of a user in Markdown format.
Returns
-------
int :
ID on write-math.com of the user. |
def insert_recording(hw):
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
cursor = connection.cursor()
sql = ("INSERT INTO `wm_raw_draw_data` ("
"`user_id`, "
"`data`, "
"`md5data`, "
"`creation_date`, "
"`device_type`, "
"`accepted_formula_id`, "
"`secret`, "
"`ip`, "
"`segmentation`, "
"`internal_id`, "
"`description` "
") VALUES (%s, %s, MD5(data), "
"%s, %s, %s, %s, %s, %s, %s, %s);")
data = (hw.user_id,
hw.raw_data_json,
getattr(hw, 'creation_date', None),
getattr(hw, 'device_type', ''),
getattr(hw, 'formula_id', None),
getattr(hw, 'secret', ''),
getattr(hw, 'ip', None),
str(getattr(hw, 'segmentation', '')),
getattr(hw, 'internal_id', ''),
getattr(hw, 'description', ''))
cursor.execute(sql, data)
connection.commit()
for symbol_id, strokes in zip(hw.symbol_stream, hw.segmentation):
insert_symbol_mapping(cursor.lastrowid,
symbol_id,
hw.user_id,
strokes)
logging.info("Insert raw data.")
except pymysql.err.IntegrityError as e:
print("Error: {} (can probably be ignored)".format(e)) | Insert recording `hw` into database. |
def insert_symbol_mapping(raw_data_id, symbol_id, user_id, strokes):
mysql = utils.get_mysql_cfg()
connection = pymysql.connect(host=mysql['host'],
user=mysql['user'],
passwd=mysql['passwd'],
db=mysql['db'],
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
sql = ("INSERT INTO `wm_partial_answer` "
"(`recording_id`, `symbol_id`, `strokes`, `user_id`, "
"`is_accepted`) "
"VALUES (%s, %s, %s, %s, 1);")
data = (raw_data_id,
symbol_id,
",".join([str(stroke) for stroke in strokes]),
user_id)
cursor.execute(sql, data)
connection.commit() | Insert data into `wm_strokes_to_symbol`.
Parameters
----------
raw_data_id : int
user_id : int
strokes: list of int |
def filter_label(label, replace_by_similar=True):
bad_names = ['celsius', 'degree', 'ohm', 'venus', 'mars', 'astrosun',
'fullmoon', 'leftmoon', 'female', 'male', 'checked',
'diameter', 'sun', 'Bowtie', 'sqrt',
'cong', 'copyright', 'dag', 'parr', 'notin', 'dotsc',
'mathds', 'mathfrak']
if any(label[1:].startswith(bad) for bad in bad_names):
if label == '\\dag' and replace_by_similar:
return '\\dagger'
elif label == '\\diameter' and replace_by_similar:
return '\\O'
return label[1:]
else:
return label | Some labels currently don't work together because of LaTeX naming
clashes. Those will be replaced by simple strings. |
def analyze_feature(raw_datasets, feature, basename="aspect_ratios"):
# Prepare files
csv_file = dam.prepare_file(basename + '.csv')
raw_file = dam.prepare_file(basename + '.raw')
csv_file = open(csv_file, 'a')
raw_file = open(raw_file, 'a')
csv_file.write("label,mean,std\n") # Write header
raw_file.write("latex,raw_data_id,value\n") # Write header
print_data = []
for _, datasets in dam.sort_by_formula_id(raw_datasets).items():
values = []
for data in datasets:
value = feature(data)[0]
values.append(value)
raw_file.write("%s,%i,%0.2f\n" % (datasets[0].formula_in_latex,
data.raw_data_id,
value))
label = filter_label(datasets[0].formula_in_latex)
print_data.append((label, numpy.mean(values), numpy.std(values)))
# Sort the data by highest mean, descending
print_data = sorted(print_data, key=lambda n: n[1], reverse=True)
# Write data to file
for label, mean, std in print_data:
csv_file.write("%s,%0.2f,%0.2f\n" % (label, mean, std))
csv_file.close() | Apply ``feature`` to all recordings in ``raw_datasets``. Store the results
in two files. One file stores the raw result, the other one groups the
results by symbols and stores the mean, standard deviation and the name of
the symbol as a csv file.
Parameters
----------
raw_datasets : List of dictionaries
Each dictionary is a raw_dataset.
feature : An instance of the feature class type
The `feature` which gets analyzed on `raw_datasets`.
basename : string
Name for the file in which the data gets written. |
def main(handwriting_datasets_file, analyze_features):
# Load from pickled file
logging.info("Start loading data '%s' ...", handwriting_datasets_file)
loaded = pickle.load(open(handwriting_datasets_file))
raw_datasets = loaded['handwriting_datasets']
logging.info("%i datasets loaded.", len(raw_datasets))
logging.info("Start analyzing...")
if analyze_features:
featurelist = [(features.AspectRatio(), "aspect_ratio.csv"),
(features.ReCurvature(1), "re_curvature.csv"),
(features.Height(), "height.csv"),
(features.Width(), "width.csv"),
(features.Time(), "time.csv"),
(features.Ink(), "ink.csv"),
(features.StrokeCount(), "stroke-count.csv")]
for feat, filename in featurelist:
logging.info("create %s...", filename)
analyze_feature(raw_datasets, feat, filename)
# Analyze everything specified in configuration
cfg = utils.get_project_configuration()
if 'data_analyzation_queue' in cfg:
metrics = dam.get_metrics(cfg['data_analyzation_queue'])
for metric in metrics:
logging.info("Start metric %s...", str(metric))
metric(raw_datasets)
else:
logging.info("No 'data_analyzation_queue' in ~/.hwrtrc") | Start the creation of the wanted metric. |
def get_parser():
project_root = utils.get_project_root()
# Get latest (raw) dataset
dataset_folder = os.path.join(project_root, "raw-datasets")
latest_dataset = utils.get_latest_in_folder(dataset_folder, "raw.pickle")
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--handwriting_datasets",
dest="handwriting_datasets",
help="where are the pickled handwriting_datasets?",
metavar="FILE",
type=lambda x: utils.is_valid_file(parser, x),
default=latest_dataset)
parser.add_argument("-f", "--features",
dest="analyze_features",
help="analyze features",
action="store_true",
default=False)
return parser | Return the parser object for this script. |
def remove_matching_braces(latex):
if latex.startswith('{') and latex.endswith('}'):
opened = 1
matches = True
for char in latex[1:-1]:
if char == '{':
opened += 1
elif char == '}':
opened -= 1
if opened == 0:
matches = False
if matches:
latex = latex[1:-1]
return latex | If `latex` is surrounded by matching braces, remove them. They are not
necessary.
Parameters
----------
latex : string
Returns
-------
string
Examples
--------
>>> remove_matching_braces('{2+2}')
'2+2'
>>> remove_matching_braces('{2+2')
'{2+2' |
def get_latex(ink_filename):
tex_file = os.path.splitext(ink_filename)[0] + ".tex"
with open(tex_file) as f:
tex_content = f.read().strip()
pattern = re.compile(r"\\begin\{displaymath\}(.*?)\\end\{displaymath\}",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) == 0:
pattern = re.compile(r"$$(.*?)$$",
re.DOTALL)
matches = pattern.findall(tex_content)
if len(matches) != 1:
raise Exception("%s: Found not one match, but %i: %s" %
(ink_filename, len(matches), matches))
formula_in_latex = matches[0].strip()
formula_in_latex = remove_matching_braces(formula_in_latex)
# repl = []
# for letter in string.letters:
# repl.append(('\mbox{%s}' % letter, letter))
# for search, replace in repl:
# formula_in_latex = formula_in_latex.replace(search, replace)
return formula_in_latex | Get the LaTeX string from a file by the *.ink filename. |
def get_segmentation(recording, annotations, internal_id=None):
global missing_stroke_segmentation, double_segmentation
segmentation = []
symbol_stream = []
needed = list(range(len(recording)))
annotations = filter(lambda n: n.startswith('SYMBOL '), annotations)
for line in annotations:
tmp = line.split("<")[1]
tmp, symbol_string = tmp.split(">")
symbol_string = symbol_string.strip()
strokes = [int(stroke) for stroke in tmp.split(",")
if int(stroke) < len(recording)]
for el in strokes:
if el not in needed:
double_segmentation.append(internal_id)
strokes.remove(el)
logging.debug("invalid segmentation by annotation: %s",
annotations)
else:
needed.remove(el)
segmentation.append(strokes)
symbol_stream.append(datasets.formula_to_dbid(mathbrush_formula_fix(symbol_string), True))
if len(needed) > 0:
# hw = handwritten_data.HandwrittenData(json.dumps(recording))
# hw.show()
missing_stroke_segmentation.append(internal_id)
segmentation.append(needed)
return segmentation, symbol_stream | Parameters
----------
recording :
A HandwrittenData object
annotations : list of strings
internal_id : string
An identifier for the dataset, e.g. 'user1/200922-947-111.ink'.
Returns
-------
tuple : segmentation and list of symbol ids (of write-math.com) |
def read_folder(folder):
recordings = []
for filename in glob.glob(os.path.join(folder, '*.ink')):
recording = parse_scg_ink_file(filename)
recordings.append(recording)
return recordings | Read all files of `folder` and return a list of HandwrittenData
objects.
Parameters
----------
folder : string
Path to a folder
Returns
-------
list :
A list of all .ink files in the given folder. |
def fix_times(self):
pointlist = self.get_pointlist()
times = [point['time'] for stroke in pointlist for point in stroke]
times_min = max(min(times), 0) # Make sure this is not None
for i, stroke in enumerate(pointlist):
for j, point in enumerate(stroke):
if point['time'] is None:
pointlist[i][j]['time'] = times_min
else:
times_min = point['time']
self.raw_data_json = json.dumps(pointlist) | Some recordings have wrong times. Fix them so that nothing after
loading a handwritten recording breaks. |
def get_pointlist(self):
try:
pointlist = json.loads(self.raw_data_json)
except Exception as inst:
logging.debug("pointStrokeList: strokelistP")
logging.debug(self.raw_data_json)
logging.debug("didn't work")
raise inst
if len(pointlist) == 0:
logging.warning("Pointlist was empty. Search for '" +
self.raw_data_json + "' in `wm_raw_draw_data`.")
return pointlist | Get a list of lists of tuples from JSON raw data string. Those lists
represent strokes with control points.
Returns
-------
list :
A list of strokes. Each stroke is a list of dictionaries
{'x': 123, 'y': 42, 'time': 1337} |
def get_sorted_pointlist(self):
pointlist = self.get_pointlist()
for i in range(len(pointlist)):
pointlist[i] = sorted(pointlist[i], key=lambda p: p['time'])
pointlist = sorted(pointlist, key=lambda stroke: stroke[0]['time'])
return pointlist | Make sure that the points and strokes are in order.
Returns
-------
list
A list of all strokes in the recording. Each stroke is represented
as a list of dicts {'time': 123, 'x': 45, 'y': 67} |
def set_pointlist(self, pointlist):
assert type(pointlist) is list, \
"pointlist is not of type list, but %r" % type(pointlist)
assert len(pointlist) >= 1, \
"The pointlist of formula_id %i is %s" % (self.formula_id,
self.get_pointlist())
self.raw_data_json = json.dumps(pointlist) | Overwrite pointlist.
Parameters
----------
pointlist : a list of strokes; each stroke is a list of points
The inner lists represent strokes. Every stroke consists of points.
Every point is a dictinary with 'x', 'y', 'time'. |
def get_bounding_box(self):
pointlist = self.get_pointlist()
# Initialize bounding box parameters to save values
minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"]
miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"]
mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"]
# Adjust parameters
for stroke in pointlist:
for p in stroke:
minx, maxx = min(minx, p["x"]), max(maxx, p["x"])
miny, maxy = min(miny, p["y"]), max(maxy, p["y"])
mint, maxt = min(mint, p["time"]), max(maxt, p["time"])
return {"minx": minx, "maxx": maxx, "miny": miny, "maxy": maxy,
"mint": mint, "maxt": maxt} | Get the bounding box of a pointlist. |
def get_bitmap(self, time=None, size=32, store_path=None):
# bitmap_width = int(self.get_width()*size) + 2
# bitmap_height = int(self.get_height()*size) + 2
img = Image.new('L', (size, size), 'black')
draw = ImageDraw.Draw(img, 'L')
bb = self.get_bounding_box()
for stroke in self.get_sorted_pointlist():
for p1, p2 in zip(stroke, stroke[1:]):
if time is not None and \
(p1['time'] > time or p2['time'] > time):
continue
y_from = int((-bb['miny'] + p1['y']) /
max(self.get_height(), 1)*size)
x_from = int((-bb['minx'] + p1['x']) /
max(self.get_width(), 1)*size)
y_to = int((-bb['miny'] + p2['y']) /
max(self.get_height(), 1)*size)
x_to = int((-bb['minx'] + p2['x']) /
max(self.get_width(), 1)*size)
draw.line([x_from, y_from, x_to, y_to],
fill='#ffffff',
width=1)
del draw
if store_path is not None:
img.save(store_path)
return numpy.asarray(img) | Get a bitmap of the object at a given instance of time. If time is
`None`,`then the bitmap is generated for the last point in time.
Parameters
----------
time : int or None
size : int
Size in pixels. The resulting bitmap will be (size x size).
store_path : None or str
If this is set, then the image will be saved there.
Returns
-------
numpy array :
Greyscale png image |
def preprocessing(self, algorithms):
assert type(algorithms) is list
for algorithm in algorithms:
algorithm(self) | Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> import preprocessing
>>> a = HandwrittenData(...)
>>> preprocessing_queue = [(preprocessing.scale_and_shift, []),
... (preprocessing.connect_strokes, []),
... (preprocessing.douglas_peucker,
... {'EPSILON': 0.2}),
... (preprocessing.space_evenly,
... {'number': 100,
... 'KIND': 'cubic'})]
>>> a.preprocessing(preprocessing_queue) |
def feature_extraction(self, algorithms):
assert type(algorithms) is list
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert len(new_features) == algorithm.get_dimension(), \
"Expected %i features from algorithm %s, got %i features" % \
(algorithm.get_dimension(), str(algorithm), len(new_features))
features += new_features
return features | Get a list of features.
Every algorithm has to return the features as a list. |
def show(self):
# prevent the following error:
# '_tkinter.TclError: no display name and no $DISPLAY environment
# variable'
# import matplotlib
# matplotlib.use('GTK3Agg', warn=False)
import matplotlib.pyplot as plt
pointlist = self.get_pointlist()
if 'pen_down' in pointlist[0][0]:
assert len(pointlist) > 1, \
"Lenght of pointlist was %i. Got: %s" % (len(pointlist),
pointlist)
# Create a new pointlist that models pen-down strokes and pen
# up strokes
new_pointlist = []
last_pendown_state = None
stroke = []
for point in pointlist[0]:
if last_pendown_state is None:
last_pendown_state = point['pen_down']
if point['pen_down'] != last_pendown_state:
new_pointlist.append(stroke)
last_pendown_state = point['pen_down']
stroke = []
else:
stroke.append(point)
new_pointlist.append(stroke) # add the last stroke
pointlist = new_pointlist
_, ax = plt.subplots()
ax.set_title("Raw data id: %s, "
"Formula_id: %s" % (str(self.raw_data_id),
str(self.formula_id)))
colors = _get_colors(self.segmentation)
for symbols, color in zip(self.segmentation, colors):
for stroke_index in symbols:
stroke = pointlist[stroke_index]
xs, ys = [], []
for p in stroke:
xs.append(p['x'])
ys.append(p['y'])
if "pen_down" in stroke[0] and stroke[0]["pen_down"] is False:
plt.plot(xs, ys, '-x', color=color)
else:
plt.plot(xs, ys, '-o', color=color)
plt.gca().invert_yaxis()
ax.set_aspect('equal')
plt.show() | Show the data graphically in a new pop-up window. |
def count_single_dots(self):
pointlist = self.get_pointlist()
single_dots = 0
for stroke in pointlist:
if len(stroke) == 1:
single_dots += 1
return single_dots | Count all strokes of this recording that have only a single dot. |
def get_center_of_mass(self):
xsum, ysum, counter = 0., 0., 0
for stroke in self.get_pointlist():
for point in stroke:
xsum += point['x']
ysum += point['y']
counter += 1
return (xsum / counter, ysum / counter) | Get a tuple (x,y) that is the center of mass. The center of mass is not
necessarily the same as the center of the bounding box. Imagine a black
square and a single dot wide outside of the square. |
def to_single_symbol_list(self):
symbol_stream = getattr(self,
'symbol_stream',
[None for symbol in self.segmentation])
single_symbols = []
pointlist = self.get_sorted_pointlist()
for stroke_indices, label in zip(self.segmentation, symbol_stream):
strokes = []
for stroke_index in stroke_indices:
strokes.append(pointlist[stroke_index])
single_symbols.append(HandwrittenData(json.dumps(strokes),
formula_id=label))
return single_symbols | Convert this HandwrittenData object into a list of HandwrittenData
objects. Each element of the list is a single symbol.
Returns
-------
list of HandwrittenData objects |
def git_log_iterator(path):
N = 10
count = 0
while True:
lines = _run_git_command_lines(['log', '--oneline',
'-n', str(N),
'--skip', str(count),
'--', '.'], cwd=path)
for line in lines:
sha = line.split(' ', 1)[0]
count += 1
yield sha
if len(lines) < N:
break | yield commits using git log -- <dir> |
def _get_odoo_version_info(addons_dir, odoo_version_override=None):
odoo_version_info = None
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
manifest = read_manifest(addon_dir)
_, _, addon_odoo_version_info = _get_version(
addon_dir, manifest, odoo_version_override,
git_post_version=False)
if odoo_version_info is not None and \
odoo_version_info != addon_odoo_version_info:
raise DistutilsSetupError("Not all addons are for the same "
"odoo version in %s (error detected "
"in %s)" % (addons_dir, addon))
odoo_version_info = addon_odoo_version_info
return odoo_version_info | Detect Odoo version from an addons directory |
def _get_version(addon_dir, manifest, odoo_version_override=None,
git_post_version=True):
version = manifest.get('version')
if not version:
warn("No version in manifest in %s" % addon_dir)
version = '0.0.0'
if not odoo_version_override:
if len(version.split('.')) < 5:
raise DistutilsSetupError("Version in manifest must have at least "
"5 components and start with "
"the Odoo series number in %s" %
addon_dir)
odoo_version = '.'.join(version.split('.')[:2])
else:
odoo_version = odoo_version_override
if odoo_version not in ODOO_VERSION_INFO:
raise DistutilsSetupError("Unsupported odoo version '%s' in %s" %
(odoo_version, addon_dir))
odoo_version_info = ODOO_VERSION_INFO[odoo_version]
if git_post_version:
version = get_git_postversion(addon_dir)
return version, odoo_version, odoo_version_info | Get addon version information from an addon directory |
def get_install_requires_odoo_addon(addon_dir,
no_depends=[],
depends_override={},
external_dependencies_override={},
odoo_version_override=None):
manifest = read_manifest(addon_dir)
_, _, odoo_version_info = _get_version(addon_dir,
manifest,
odoo_version_override,
git_post_version=False)
return _get_install_requires(odoo_version_info,
manifest,
no_depends,
depends_override,
external_dependencies_override) | Get the list of requirements for an addon |
def get_install_requires_odoo_addons(addons_dir,
depends_override={},
external_dependencies_override={},
odoo_version_override=None):
addon_dirs = []
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
addon_dirs.append(addon_dir)
install_requires = set()
for addon_dir in addon_dirs:
r = get_install_requires_odoo_addon(
addon_dir,
no_depends=addons,
depends_override=depends_override,
external_dependencies_override=external_dependencies_override,
odoo_version_override=odoo_version_override,
)
install_requires.update(r)
return sorted(install_requires) | Get the list of requirements for a directory containing addons |
def _find_addons_dir():
res = set()
for odoo_version_info in ODOO_VERSION_INFO.values():
addons_ns = odoo_version_info['addons_ns']
addons_dir = os.path.join(*addons_ns.split('.'))
if os.path.isdir(addons_dir):
if not odoo_version_info['namespace_packages'] or \
os.path.isfile(os.path.join(addons_dir, '__init__.py')):
res.add((addons_dir, addons_ns))
if len(res) == 0:
raise RuntimeError("No addons namespace found.")
if len(res) > 1:
raise RuntimeError("More than one addons namespace found.")
return res.pop() | Try to find the addons dir / namespace package
Returns addons_dir, addons_ns |
def make_declarative_base(self, metadata=None):
return make_declarative_base(self.session,
Model=self.Model,
metadata=metadata) | Override parent function with alchy's |
def prep_doc(self, doc_obj):
doc = doc_obj._data.copy()
for key, prop in list(doc_obj._base_properties.items()):
prop.validate(doc.get(key), key)
raw_value = prop.get_python_value(doc.get(key))
if prop.unique:
self.check_unique(doc_obj, key, raw_value)
value = prop.get_db_value(raw_value)
doc[key] = value
doc['_doc_type'] = get_doc_type(doc_obj.__class__)
return doc | This method Validates, gets the Python value, checks unique indexes,
gets the db value, and then returns the prepared doc dict object.
Useful for save and backup functions.
@param doc_obj:
@return: |
def apply_zappa_settings(zappa_obj, zappa_settings, environment):
'''Load Zappa settings, set defaults if needed, and apply to the Zappa object'''
settings_all = json.load(zappa_settings)
settings = settings_all[environment]
# load defaults for missing options
for key,value in DEFAULT_SETTINGS.items():
settings[key] = settings.get(key, value)
if '~' in settings['settings_file']:
settings['settings_file'] = settings['settings_file'].replace('~', os.path.expanduser('~'))
if not os.path.isfile(settings['settings_file']):
raise SettingsError("Please make sure your settings_file "
"is properly defined in {0}.".format(zappa_settings))
for setting in CUSTOM_SETTINGS:
if setting in settings:
setattr(zappa_obj, setting, settings[setting])
return settingf apply_zappa_settings(zappa_obj, zappa_settings, environment):
'''Load Zappa settings, set defaults if needed, and apply to the Zappa object'''
settings_all = json.load(zappa_settings)
settings = settings_all[environment]
# load defaults for missing options
for key,value in DEFAULT_SETTINGS.items():
settings[key] = settings.get(key, value)
if '~' in settings['settings_file']:
settings['settings_file'] = settings['settings_file'].replace('~', os.path.expanduser('~'))
if not os.path.isfile(settings['settings_file']):
raise SettingsError("Please make sure your settings_file "
"is properly defined in {0}.".format(zappa_settings))
for setting in CUSTOM_SETTINGS:
if setting in settings:
setattr(zappa_obj, setting, settings[setting])
return settings | Load Zappa settings, set defaults if needed, and apply to the Zappa object |
def deploy(environment, zappa_settings):
print(("Deploying " + environment))
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Make sure the necessary IAM execution roles are available
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.create_lambda_function(bucket=s3_bucket_name,
s3_key=zip_path,
function_name=lambda_name,
handler='handler.lambda_handler',
vpc_config=settings['vpc_config'],
memory_size=settings['memory_size'])
# Create and configure the API Gateway
api_id = zappa.create_api_gateway_routes(lambda_arn, lambda_name)
# Deploy the API!
endpoint_url = zappa.deploy_api_gateway(api_id, environment)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
if settings['touch']:
requests.get(endpoint_url)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print(("Your Zappa deployment is live!: " + endpoint_url)) | Package, create and deploy to Lambda. |
def update(environment, zappa_settings):
print(("Updating " + environment))
# Package dependencies, and the source code into a zip
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Update IAM roles if needed
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.update_lambda_function(s3_bucket_name, zip_path,
lambda_name)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print("Your updated Zappa deployment is live!") | Update an existing deployment. |
def tail(environment, zappa_settings):
def print_logs(logs):
for log in logs:
timestamp = log['timestamp']
message = log['message']
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
print("[" + str(timestamp) + "] " + message.strip())
zappa, settings, _, lambda_name = _init(environment, zappa_settings)
try:
# Tail the available logs
all_logs = zappa.fetch_logs(lambda_name)
print_logs(all_logs)
# Keep polling, and print any new logs.
while True:
all_logs_again = zappa.fetch_logs(lambda_name)
new_logs = []
for log in all_logs_again:
if log not in all_logs:
new_logs.append(log)
print_logs(new_logs)
all_logs = all_logs + new_logs
except KeyboardInterrupt:
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(0) | Stolen verbatim from django-zappa:
https://github.com/Miserlou/django-zappa/blob/master/django_zappa/management/commands/tail.py |
def get_context_data(self, **kwargs):
queryset = kwargs.pop('object_list')
page_template = kwargs.pop('page_template', None)
context_object_name = self.get_context_object_name(queryset)
context = {'object_list': queryset, 'view': self}
context.update(kwargs)
if context_object_name is not None:
context[context_object_name] = queryset
if page_template is None:
if hasattr(queryset, 'model'):
page_template = self.get_page_template(**kwargs)
else:
raise ImproperlyConfigured(
'AjaxListView requires a page_template')
context['page_template'] = self.page_template = page_template
return context | Get the context for this view.
Also adds the *page_template* variable in the context.
If the *page_template* is not given as a kwarg of the *as_view*
method then it is generated using app label, model name
(obviously if the list is a queryset), *self.template_name_suffix*
and *self.page_template_suffix*.
For instance, if the list is a queryset of *blog.Entry*,
the template will be ``blog/entry_list_page.html``. |
def get_template_names(self):
request = self.request
querystring_key = request.REQUEST.get('querystring_key', PAGE_LABEL)
if request.is_ajax() and querystring_key == self.key:
return [self.page_template]
return super(
AjaxMultipleObjectTemplateResponseMixin, self).get_template_names() | Switch the templates for Ajax requests. |
def clean_var(text):
text = re_invalid_var.sub('', text)
text = re_invalid_start.sub('', text)
return text | Turn text into a valid python classname or variable |
def full_tasktrace(self):
if self.prev_error:
return self.prev_error.tasktrace + self.tasktrace
else:
return self.tasktrace | List of all failed tasks caused by this and all previous errors.
Returns:
List[Task] |
def dist_cubic(self, other=None):
v = self - other if other else self
return sum(map(abs, v.vector)) | Manhattan distance |
def dist_sq(self, other=None):
v = self - other if other else self
return sum(map(lambda a: a * a, v)) | For fast length comparison |
def yaw_pitch(self):
if not self:
return YawPitch(0, 0)
ground_distance = math.sqrt(self.x ** 2 + self.z ** 2)
if ground_distance:
alpha1 = -math.asin(self.x / ground_distance) / math.pi * 180
alpha2 = math.acos(self.z / ground_distance) / math.pi * 180
if alpha2 > 90:
yaw = 180 - alpha1
else:
yaw = alpha1
pitch = math.atan2(-self.y, ground_distance) / math.pi * 180
else:
yaw = 0
y = round(self.y)
if y > 0:
pitch = -90
elif y < 0:
pitch = 90
else:
pitch = 0
return YawPitch(yaw, pitch) | Calculate the yaw and pitch of this vector |
def unit_vector(self):
x = -math.cos(self.rpitch) * math.sin(self.ryaw)
y = -math.sin(self.rpitch)
z = math.cos(self.rpitch) * math.cos(self.ryaw)
return Vector3(x, y, z) | Generate a unit vector (norm = 1) |
def total_ingredient_amounts(self):
totals = defaultdict(int)
for id, meta, amount in self.ingredients:
totals[(id, meta)] += amount
return totals | Returns:
dict: In the form { (item_id, metadata) -> amount } |
def ingredient_positions(self):
positions = defaultdict(list)
for y, row in enumerate(self.in_shape):
for x, (item_id, metadata, amount) in enumerate(row):
positions[(item_id, metadata)].append((x, y, amount))
return positions | Returns:
dict: In the form { (item_id, metadata) -> [(x, y, amount), ...] } |
def make_slot_check(wanted):
if isinstance(wanted, types.FunctionType):
return wanted # just forward the slot check function
if isinstance(wanted, int):
item, meta = wanted, None
elif isinstance(wanted, Slot):
item, meta = wanted.item_id, wanted.damage # TODO compare NBT
elif isinstance(wanted, (Item, Block)):
item, meta = wanted.id, wanted.metadata
elif isinstance(wanted, str):
item_or_block = get_item_or_block(wanted, init=True)
item, meta = item_or_block.id, item_or_block.metadata
else: # wanted is (id, meta)
try:
item, meta = wanted
except TypeError:
raise ValueError('Illegal args for make_slot_check(): %s' % wanted)
return lambda slot: item == slot.item_id and meta in (None, slot.damage) | Creates and returns a function that takes a slot
and checks if it matches the wanted item.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata) |
def _make_window(window_dict):
cls_name = '%sWindow' % camel_case(str(window_dict['name']))
bases = (Window,)
attrs = {
'__module__': sys.modules[__name__],
'name': str(window_dict['name']),
'inv_type': str(window_dict['id']),
'inv_data': window_dict,
}
# creates function-local index and size variables
def make_slot_method(index, size=1):
if size == 1:
return lambda self: self.slots[index]
else:
return lambda self: self.slots[index:(index + size)]
for slots in window_dict.get('slots', []):
index = slots['index']
size = slots.get('size', 1)
attr_name = snake_case(str(slots['name']))
attr_name += '_slot' if size == 1 else '_slots'
slots_method = make_slot_method(index, size)
slots_method.__name__ = attr_name
attrs[attr_name] = property(slots_method)
for i, prop_name in enumerate(window_dict.get('properties', [])):
def make_prop_method(i):
return lambda self: self.properties[i]
prop_method = make_prop_method(i)
prop_name = snake_case(str(prop_name))
prop_method.__name__ = prop_name
attrs[prop_name] = property(prop_method)
cls = type(cls_name, bases, attrs)
assert not hasattr(sys.modules[__name__], cls_name), \
'Window "%s" already registered at %s' % (cls_name, __name__)
setattr(sys.modules[__name__], cls_name, cls)
return cls | Creates a new class for that window and registers it at this module. |
def get_dict(self):
data = {'id': self.item_id}
if self.item_id != constants.INV_ITEMID_EMPTY:
data['damage'] = self.damage
data['amount'] = self.amount
if self.nbt is not None:
data['enchants'] = self.nbt
return data | Formats the slot for network packing. |
def on_success(self, inv_plugin, emit_set_slot):
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot) | Called when the click was successful
and should be applied to the inventory.
Args:
inv_plugin (InventoryPlugin): inventory plugin instance
emit_set_slot (func): function to signal a slot change,
should be InventoryPlugin().emit_set_slot |
def authenticate(self):
endpoint = '/authenticate'
payload = {
'agent': {
'name': 'Minecraft',
'version': self.ygg_version,
},
'username': self.username,
'password': self.password,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.available_profiles = rep['availableProfiles']
self.selected_profile = rep['selectedProfile']
return True | Generate an access token using an username and password. Any existing
client token is invalidated if not provided.
Returns:
dict: Response or error dict |
def refresh(self):
endpoint = '/refresh'
payload = {
'accessToken': self.access_token,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.selected_profile = rep['selectedProfile']
return True | Generate an access token with a client/access token pair. Used
access token is invalidated.
Returns:
dict: Response or error dict |
def invalidate(self):
endpoint = '/invalidate'
payload = {
'accessToken': self.access_token,
'clientToken': self.client_token,
}
self._ygg_req(endpoint, payload)
self.client_token = ''
self.access_token = ''
self.available_profiles = []
self.selected_profile = {}
return True | Invalidate access tokens with a client/access token pair
Returns:
dict: Empty or error dict |
def validate(self):
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return not bool(rep) | Check if an access token is valid
Returns:
dict: Empty or error dict |
def total_stored(self, wanted, slots=None):
if slots is None:
slots = self.window.slots
wanted = make_slot_check(wanted)
return sum(slot.amount for slot in slots if wanted(slot)) | Calculates the total number of items of that type
in the current window or given slot range.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata) |
def find_slot(self, wanted, slots=None):
for slot in self.find_slots(wanted, slots):
return slot
return None | Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found. |
def find_slots(self, wanted, slots=None):
if slots is None:
slots = self.inv_slots_preferred + self.window.window_slots
wanted = make_slot_check(wanted)
for slot in slots:
if wanted(slot):
yield slot | Yields all slots containing the item.
Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata) |
def click_slot(self, slot, right=False):
if isinstance(slot, int):
slot = self.window.slots[slot]
button = constants.INV_BUTTON_RIGHT \
if right else constants.INV_BUTTON_LEFT
return self.send_click(windows.SingleClick(slot, button)) | Left-click or right-click the slot.
Args:
slot (Slot): The clicked slot. Can be ``Slot`` instance or integer.
Set to ``inventory.cursor_slot``
for clicking outside the window. |
def drop_slot(self, slot=None, drop_stack=False):
if slot is None:
if self.cursor_slot.is_empty:
slot = self.active_slot
else:
slot = self.cursor_slot
elif isinstance(slot, int): # also allow slot nr
slot = self.window.slots[slot]
if slot == self.cursor_slot:
# dropping items from cursor is done via normal click
return self.click_slot(self.cursor_slot, not drop_stack)
return self.send_click(windows.DropClick(slot, drop_stack)) | Drop one or all items of the slot.
Does not wait for confirmation from the server. If you want that,
use a ``Task`` and ``yield inventory.async.drop_slot()`` instead.
If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty,
the currently held item (``active_slot``).
Args:
slot (Optional[Slot]): The dropped slot. Can be None, integer,
or ``Slot`` instance.
Returns:
int: The action ID of the click |
def inv_slots_preferred(self):
slots = [self.active_slot]
slots.extend(slot for slot in self.window.hotbar_slots
if slot != self.active_slot)
slots.extend(self.window.inventory_slots)
return slots | List of all available inventory slots in the preferred search order.
Does not include the additional slots from the open window.
1. active slot
2. remainder of the hotbar
3. remainder of the persistent inventory |
def get_block_entity_data(self, pos_or_x, y=None, z=None):
if None not in (y, z): # x y z supplied
pos_or_x = pos_or_x, y, z
coord_tuple = tuple(int(floor(c)) for c in pos_or_x)
return self.block_entities.get(coord_tuple, None) | Access block entity data.
Returns:
BlockEntityData subclass instance or
None if no block entity data is stored for that location. |
def set_block_entity_data(self, pos_or_x, y=None, z=None, data=None):
if None not in (y, z): # x y z supplied
pos_or_x = pos_or_x, y, z
coord_tuple = tuple(int(floor(c)) for c in pos_or_x)
old_data = self.block_entities.get(coord_tuple, None)
self.block_entities[coord_tuple] = data
return old_data | Update block entity data.
Returns:
Old data if block entity data was already stored for that location,
None otherwise. |
def store_or_drop(self):
inv = self.inventory
if inv.cursor_slot.is_empty: # nothing to drop
raise StopIteration(None)
storage = inv.inv_slots_preferred
if inv.window.is_storage:
storage += inv.window.window_slots
first_empty_slot = inv.find_slot(constants.INV_ITEMID_EMPTY, storage)
if first_empty_slot is not None:
yield self.click_slot(first_empty_slot)
else:
yield self.drop_slot(drop_stack=True)
if not inv.cursor_slot.is_empty:
raise TaskFailed('Store or Drop failed: cursor is not empty')
raise StopIteration(first_empty_slot) | Stores the cursor item or drops it if the inventory is full.
Tip: look directly up or down before calling this, so you can
pick up the dropped item when the inventory frees up again.
Returns:
Slot: The slot used to store it, or None if dropped. |
def parse_vlq(self, segment):
values = []
cur, shift = 0, 0
for c in segment:
val = B64[ord(c)]
# Each character is 6 bits:
# 5 of value and the high bit is the continuation.
val, cont = val & 0b11111, val >> 5
cur += val << shift
shift += 5
if not cont:
# The low bit of the unpacked value is the sign.
cur, sign = cur >> 1, cur & 1
if sign:
cur = -cur
values.append(cur)
cur, shift = 0, 0
if cur or shift:
raise SourceMapDecodeError('leftover cur/shift in vlq decode')
return values | Parse a string of VLQ-encoded data.
Returns:
a list of integers. |
def discover(source):
"Given a JavaScript file, find the sourceMappingURL line"
source = source.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(source) > 10:
possibilities = source[:5] + source[-5:]
else:
possibilities = source
for line in set(possibilities):
pragma = line[:21]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=':
# We want everything AFTER the pragma, which is 21 chars long
return line[21:].rstrip()
# XXX: Return None or raise an exception?
return Nonf discover(source):
"Given a JavaScript file, find the sourceMappingURL line"
source = source.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(source) > 10:
possibilities = source[:5] + source[-5:]
else:
possibilities = source
for line in set(possibilities):
pragma = line[:21]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=':
# We want everything AFTER the pragma, which is 21 chars long
return line[21:].rstrip()
# XXX: Return None or raise an exception?
return None | Given a JavaScript file, find the sourceMappingURL line |
def bootstrap(options):
try:
import virtualenv
except ImportError, e:
raise RuntimeError("virtualenv is needed for bootstrap")
bdir = options.bootstrap_dir
if not os.path.exists(bdir):
os.makedirs(bdir)
bscript = "boostrap.py"
options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
bscript)
options.virtualenv.no_site_packages = True
options.bootstrap.no_site_packages = True
print options.virtualenv.script_name
call_task('paver.virtual.bootstrap')
sh('cd %s; %s %s' % (bdir, sys.executable, bscript)) | create virtualenv in ./bootstrap |
def clean():
d = ['build', 'dist', 'scikits.audiolab.egg-info', HTML_DESTDIR,
PDF_DESTDIR]
for i in d:
paver.path.path(i).rmtree()
(paver.path.path('docs') / options.sphinx.builddir).rmtree() | Remove build, dist, egg-info garbage. |
def add_attendees(self, attendees, required=True):
new_attendees = self._build_resource_dictionary(attendees, required=required)
for email in new_attendees:
self._attendees[email] = new_attendees[email]
self._dirty_attributes.add(u'attendees') | Adds new attendees to the event.
*attendees* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. |
def remove_attendees(self, attendees):
attendees_to_delete = self._build_resource_dictionary(attendees)
for email in attendees_to_delete.keys():
if email in self._attendees:
del self._attendees[email]
self._dirty_attributes.add(u'attendees') | Removes attendees from the event.
*attendees* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. |
def add_resources(self, resources):
new_resources = self._build_resource_dictionary(resources)
for key in new_resources:
self._resources[key] = new_resources[key]
self._dirty_attributes.add(u'resources') | Adds new resources to the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. |
def remove_resources(self, resources):
resources_to_delete = self._build_resource_dictionary(resources)
for email in resources_to_delete.keys():
if email in self._resources:
del self._resources[email]
self._dirty_attributes.add(u'resources') | Removes resources from the event.
*resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. |
def validate(self):
if not self.start:
raise ValueError("Event has no start date")
if not self.end:
raise ValueError("Event has no end date")
if self.end < self.start:
raise ValueError("Start date is after end date")
if self.reminder_minutes_before_start and not isinstance(self.reminder_minutes_before_start, int):
raise TypeError("reminder_minutes_before_start must be of type int")
if self.is_all_day and not isinstance(self.is_all_day, bool):
raise TypeError("is_all_day must be of type bool") | Validates that all required fields are present |
def _writer_factory(name, format, def_fs, descr):
def basic_writer(data, filename, fs = def_fs, enc = format.encoding):
"""Common "template" to all write functions."""
if np.ndim(data) <= 1:
nc = 1
elif np.ndim(data) == 2:
nc = data.shape[1]
else:
RuntimeError("Only rank 0, 1, and 2 arrays supported as audio data")
uformat = Format(format.file_format, encoding=enc,
endianness=format.endianness)
hdl = Sndfile(filename, 'w', uformat, nc, fs)
try:
hdl.write_frames(data)
finally:
hdl.close()
doc = \
"""Simple writer for %(format)s audio files.
Parameters
----------
data : array
a rank 1 (mono) or 2 (one channel per col) numpy array
filename : str
audio file name
fs : scalar
sampling rate in Hz (%(def_fs)s by default)
enc : str
The encoding such as 'pcm16', etc...(%(def_enc)s by default). A
list of supported encodings can be queried through the function
available_encodings.
Notes
-----
OVERWRITES EXISTING FILE !
These functions are similar to matlab's wavwrite/auwrite and the
like. For total control over options, such as endianness, appending data
to an existing file, etc... you should use Sndfile class instances
instead
See also
--------
available_encodings, Sndfile, Format""" \
% {'format' : str(descr), 'def_fs': def_fs,
'def_enc': format.encoding}
basic_writer.__doc__ = doc
basic_writer.__name__ = name
return basic_writer | Create a writer function with fileformat described by format, default
sampling rate def_fs, and docstring descr. |
def _reader_factory(name, filetype, descr):
def basic_reader(filename, last = None, first = 0):
"""Common "template" to all read functions."""
hdl = Sndfile(filename, 'r')
try:
if not hdl.format.file_format == filetype:
raise ValueError, "%s is not a %s file (is %s)" \
% (filename, filetype, hdl.format.file_format)
fs = hdl.samplerate
enc = hdl.encoding
# Set the pointer to start position
nf = hdl.seek(first, 1)
if not nf == first:
raise IOError("Error while seeking at starting position")
if last is None:
nframes = hdl.nframes - first
data = hdl.read_frames(nframes)
else:
data = hdl.read_frames(last)
finally:
hdl.close()
return data, fs, enc
doc = \
"""Simple reader for %(format)s audio files.
Parameters
----------
filename : str
Name of the file to read
last : int
Last frame to read. If None, this is equal to the number of frames in
the file.
first : int
First frame to read. If 0, means starting from the beginning of the
file.
Returns
-------
data : array
the read data (one column per channel)
fs : int
the sampling rate
enc : str
the encoding of the file, such as 'pcm16', 'float32', etc...
Notes
-----
For total control over options, such as output's dtype, etc...,
you should use Sndfile class instances instead""" % {'format': str(descr)}
basic_reader.__doc__ = doc
basic_reader.__name__ = name
return basic_reader | Factory for reader functions ala matlab. |
def _xpath_to_dict(self, element, property_map, namespace_map):
result = {}
log.info(etree.tostring(element, pretty_print=True))
for key in property_map:
item = property_map[key]
log.info(u'Pulling xpath {xpath} into key {key}'.format(key=key, xpath=item[u'xpath']))
nodes = element.xpath(item[u'xpath'], namespaces=namespace_map)
if nodes:
result_for_node = []
for node in nodes:
cast_as = item.get(u'cast', None)
if cast_as == u'datetime':
result_for_node.append(self._parse_date(node.text))
elif cast_as == u'date_only_naive':
result_for_node.append(self._parse_date_only_naive(node.text))
elif cast_as == u'int':
result_for_node.append(int(node.text))
elif cast_as == u'bool':
if node.text.lower() == u'true':
result_for_node.append(True)
else:
result_for_node.append(False)
else:
result_for_node.append(node.text)
if not result_for_node:
result[key] = None
elif len(result_for_node) == 1:
result[key] = result_for_node[0]
else:
result[key] = result_for_node
return result | property_map = {
u'name' : { u'xpath' : u't:Mailbox/t:Name'},
u'email' : { u'xpath' : u't:Mailbox/t:EmailAddress'},
u'response' : { u'xpath' : u't:ResponseType'},
u'last_response': { u'xpath' : u't:LastResponseTime', u'cast': u'datetime'},
}
This runs the given xpath on the node and returns a dictionary |
def _parse_response_for_all_events(self, response):
items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if not items:
items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES)
if items:
self.count = len(items)
log.debug(u'Found %s items' % self.count)
for item in items:
self._add_event(xml=soap_request.M.Items(deepcopy(item)))
else:
log.debug(u'No calendar items found with search parameters.')
return self | This function will retrieve *most* of the event data, excluding Organizer & Attendee details |
def load_all_details(self):
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self | This function will execute all the event lookups for known events.
This is intended for use when you want to have a completely populated event entry, including
Organizer & Attendee details. |
def resend_invitations(self):
if not self.id:
raise TypeError(u"You can't send invites for an event that hasn't been created yet.")
# Under the hood, this is just an .update() but with no attributes changed.
# We're going to enforce that by checking if there are any changed attributes and bail if there are
if self._dirty_attributes:
raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll')
self.service.send(body)
return self | Resends invites for an event. ::
event = service.calendar().get_event(id='KEY HERE')
event.resend_invitations()
Anybody who has not declined this meeting will get a new invite. |
def update(self, calendar_item_update_operation_type=u'SendToAllAndSaveCopy', **kwargs):
if not self.id:
raise TypeError(u"You can't update an event that hasn't been created yet.")
if 'send_only_to_changed_attendees' in kwargs:
warnings.warn(
"The argument send_only_to_changed_attendees is deprecated. Use calendar_item_update_operation_type instead.",
DeprecationWarning,
) # 20140502
if kwargs['send_only_to_changed_attendees']:
calendar_item_update_operation_type = u'SendToChangedAndSaveCopy'
VALID_UPDATE_OPERATION_TYPES = (
u'SendToNone', u'SendOnlyToAll', u'SendOnlyToChanged',
u'SendToAllAndSaveCopy', u'SendToChangedAndSaveCopy',
)
if calendar_item_update_operation_type not in VALID_UPDATE_OPERATION_TYPES:
raise ValueError('calendar_item_update_operation_type has unknown value')
self.validate()
if self._dirty_attributes:
log.debug(u"Updating these attributes: %r" % self._dirty_attributes)
self.refresh_change_key()
body = soap_request.update_item(self, self._dirty_attributes, calendar_item_update_operation_type=calendar_item_update_operation_type)
self.service.send(body)
self._reset_dirty_attributes()
else:
log.info(u"Update was called, but there's nothing to update. Doing nothing.")
return self | Updates an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.location = u'New location'
event.update()
If no changes to the event have been made, this method does nothing.
Notification of the change event is sent to all users. If you wish to just notify people who were
added, specify ``send_only_to_changed_attendees=True``. |
def cancel(self):
if not self.id:
raise TypeError(u"You can't delete an event that hasn't been created yet.")
self.refresh_change_key()
self.service.send(soap_request.delete_event(self))
# TODO rsanders high - check return status to make sure it was actually sent
return None | Cancels an event in Exchange. ::
event = service.calendar().get_event(id='KEY HERE')
event.cancel()
This will send notifications to anyone who has not declined the meeting. |
def move_to(self, folder_id):
if not folder_id:
raise TypeError(u"You can't move an event to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move an event that hasn't been created yet.")
self.refresh_change_key()
response_xml = self.service.send(soap_request.move_event(self, folder_id))
new_id, new_change_key = self._parse_id_and_change_key_from_response(response_xml)
if not new_id:
raise ValueError(u"MoveItem returned success but requested item not moved")
self._id = new_id
self._change_key = new_change_key
self.calendar_id = folder_id
return self | :param str folder_id: The Calendar ID to where you want to move the event to.
Moves an event to a different folder (calendar). ::
event = service.calendar().get_event(id='KEY HERE')
event.move_to(folder_id='NEW CALENDAR KEY HERE') |
def get_master(self):
if self.type != 'Occurrence':
raise InvalidEventType("get_master method can only be called on a 'Occurrence' event type")
body = soap_request.get_master(exchange_id=self._id, format=u"AllProperties")
response_xml = self.service.send(body)
return Exchange2010CalendarEvent(service=self.service, xml=response_xml) | get_master()
:raises InvalidEventType: When this method is called on an event that is not a Occurrence type.
This will return the master event to the occurrence.
**Examples**::
event = service.calendar().get_event(id='<event_id>')
print event.type # If it prints out 'Occurrence' then that means we could get the master.
master = event.get_master()
print master.type # Will print out 'RecurringMaster'. |
def get_occurrence(self, instance_index):
if not all([isinstance(i, int) for i in instance_index]):
raise TypeError("instance_index must be an interable of type int")
if self.type != 'RecurringMaster':
raise InvalidEventType("get_occurrance method can only be called on a 'RecurringMaster' event type")
body = soap_request.get_occurrence(exchange_id=self._id, instance_index=instance_index, format=u"AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events | get_occurrence(instance_index)
:param iterable instance_index: This should be tuple or list of integers which correspond to occurrences.
:raises TypeError: When instance_index is not an iterable of ints.
:raises InvalidEventType: When this method is called on an event that is not a RecurringMaster type.
This will return a list of occurrence events.
**Examples**::
master = service.calendar().get_event(id='<event_id>')
# The following will return the first 20 occurrences in the recurrence.
# If there are not 20 occurrences, it will only return what it finds.
occurrences = master.get_occurrence(range(1,21))
for occurrence in occurrences:
print occurrence.start |
def conflicting_events(self):
if not self.conflicting_event_ids:
return []
body = soap_request.get_item(exchange_id=self.conflicting_event_ids, format="AllProperties")
response_xml = self.service.send(body)
items = response_xml.xpath(u'//m:GetItemResponseMessage/m:Items', namespaces=soap_request.NAMESPACES)
events = []
for item in items:
event = Exchange2010CalendarEvent(service=self.service, xml=deepcopy(item))
if event.id:
events.append(event)
return events | conflicting_events()
This will return a list of conflicting events.
**Example**::
event = service.calendar().get_event(id='<event_id>')
for conflict in event.conflicting_events():
print conflict.subject |
def find_folder(self, parent_id):
body = soap_request.find_folder(parent_id=parent_id, format=u'AllProperties')
response_xml = self.service.send(body)
return self._parse_response_for_find_folder(response_xml) | find_folder(parent_id)
:param str parent_id: The parent folder to list.
This method will return a list of sub-folders to a given parent folder.
**Examples**::
# Iterate through folders within the default 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
print(folder.display_name)
# Delete all folders within the 'calendar' folder.
folders = service.folder().find_folder(parent_id='calendar')
for folder in folders:
folder.delete() |
def create(self):
self.validate()
body = soap_request.new_folder(self)
response_xml = self.service.send(body)
self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml)
return self | Creates a folder in Exchange. ::
calendar = service.folder().new_folder(
display_name=u"New Folder Name",
folder_type="CalendarFolder",
parent_id='calendar',
)
calendar.create() |
def delete(self):
if not self.id:
raise TypeError(u"You can't delete a folder that hasn't been created yet.")
body = soap_request.delete_folder(self)
response_xml = self.service.send(body) # noqa
# TODO: verify deletion
self._id = None
self._change_key = None
return None | Deletes a folder from the Exchange store. ::
folder = service.folder().get_folder(id)
print("Deleting folder: %s" % folder.display_name)
folder.delete() |
def move_to(self, folder_id):
if not folder_id:
raise TypeError(u"You can't move to a non-existant folder")
if not isinstance(folder_id, BASESTRING_TYPES):
raise TypeError(u"folder_id must be a string")
if not self.id:
raise TypeError(u"You can't move a folder that hasn't been created yet.")
response_xml = self.service.send(soap_request.move_folder(self, folder_id)) # noqa
result_id, result_key = self._parse_id_and_change_key_from_response(response_xml)
if self.id != result_id:
raise ValueError(u"MoveFolder returned success but requested folder not moved")
self.parent_id = folder_id
return self | :param str folder_id: The Folder ID of what will be the new parent folder, of this folder.
Move folder to a different location, specified by folder_id::
folder = service.folder().get_folder(id)
folder.move_to(folder_id="ID of new location's folder") |
def seek(self, offset, whence=0, mode='rw'):
try:
st = self._sndfile.seek(offset, whence, mode)
except IOError, e:
raise PyaudioIOError(str(e))
return st | similar to python seek function, taking only in account audio data.
:Parameters:
offset : int
the number of frames (eg two samples for stereo files) to move
relatively to position set by whence.
whence : int
only 0 (beginning), 1 (current) and 2 (end of the file) are
valid.
mode : string
If set to 'rw', both read and write pointers are updated. If
'r' is given, only read pointer is updated, if 'w', only the
write one is (this may of course make sense only if you open
the file in a certain mode).
Notes
-----
- one only takes into accound audio data.
- if an invalid seek is given (beyond or before the file), a
PyaudioIOError is launched. |
def read_frames(self, nframes, dtype=np.float64):
return self._sndfile.read_frames(nframes, dtype) | Read nframes frames of the file.
:Parameters:
nframes : int
number of frames to read.
dtype : numpy dtype
dtype of the returned array containing read data (see note).
Notes
-----
- read_frames updates the read pointer.
- One column is one channel (one row per channel after 0.9)
- if float are requested when the file contains integer data, you will
get normalized data (that is the max possible integer will be 1.0,
and the minimal possible value -1.0).
- if integers are requested when the file contains floating point data,
it may give wrong results because there is an ambiguity: if the
floating data are normalized, you can get a file with only 0 !
Getting integer data from files encoded in normalized floating point
is not supported (yet: sndfile supports it). |
def write_frames(self, input, nframes = -1):
if nframes == -1:
if input.ndim == 1:
nframes = input.size
elif input.ndim == 2:
nframes = input.shape[0]
else:
raise ValueError("Input has to be rank 1 (mono) or rank 2 "\
"(multi-channels)")
return self._sndfile.write_frames(input[:nframes,...]) | write data to file.
:Parameters:
input : ndarray
array containing data to write.
nframes : int
number of frames to write.
Notes
-----
- One column is one channel (one row per channel after 0.9)
- updates the write pointer.
- if float are given when the file contains integer data, you should
put normalized data (that is the range [-1..1] will be written as the
maximum range allowed by the integer bitwidth). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.