body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
f1cf716decae4fe9dda10739db6a2e5e58dc9ba049463b17f4bd421099a1cf4a | def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):
'\n\t函数输入:\n\t图片信息\n\t类别统计信息\n\t训练信息类\n\t计算输出特征图大小的函数\n\tkeras用的什么内核\n\t是否为训练\n\n\t函数输出:\n\t图片\n\t数据对象:第一个是是否包含对象,第二个是回归梯度\n\t增强后的图片信息\n\t'
sample_selector = SampleSelector(class_count)
while True:
if (mode == 'train'):
np.random.shuffle(all_img_data)
for img_data in all_img_data:
try:
if (C.balanced_classes and sample_selector.skip_sample_for_balanced_class(img_data)):
continue
if (mode == 'train'):
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=True)
else:
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=False)
(width, height) = (img_data_aug['width'], img_data_aug['height'])
(rows, cols, _) = x_img.shape
assert (cols == width)
assert (rows == height)
(resized_width, resized_height) = get_new_img_size(width, height, C.im_size)
x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)
try:
(y_rpn_cls, y_rpn_regr) = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)
except:
continue
x_img = x_img[(:, :, (2, 1, 0))]
x_img = x_img.astype(np.float32)
x_img[(:, :, 0)] -= C.img_channel_mean[0]
x_img[(:, :, 1)] -= C.img_channel_mean[1]
x_img[(:, :, 2)] -= C.img_channel_mean[2]
x_img /= C.img_scaling_factor
x_img = np.transpose(x_img, (2, 0, 1))
x_img = np.expand_dims(x_img, axis=0)
y_rpn_regr[(:, (y_rpn_regr.shape[1] // 2):, :, :)] *= C.std_scaling
if (backend == 'tf'):
x_img = np.transpose(x_img, (0, 2, 3, 1))
y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))
y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))
(yield (np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug))
except Exception as e:
print(e)
continue | 函数输入:
图片信息
类别统计信息
训练信息类
计算输出特征图大小的函数
keras用的什么内核
是否为训练
函数输出:
图片
数据对象:第一个是是否包含对象,第二个是回归梯度
增强后的图片信息 | keras_frcnn/data_generators.py | get_anchor_gt | zouzhen/simple-faster-rcnn | 0 | python | def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):
'\n\t函数输入:\n\t图片信息\n\t类别统计信息\n\t训练信息类\n\t计算输出特征图大小的函数\n\tkeras用的什么内核\n\t是否为训练\n\n\t函数输出:\n\t图片\n\t数据对象:第一个是是否包含对象,第二个是回归梯度\n\t增强后的图片信息\n\t'
sample_selector = SampleSelector(class_count)
while True:
if (mode == 'train'):
np.random.shuffle(all_img_data)
for img_data in all_img_data:
try:
if (C.balanced_classes and sample_selector.skip_sample_for_balanced_class(img_data)):
continue
if (mode == 'train'):
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=True)
else:
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=False)
(width, height) = (img_data_aug['width'], img_data_aug['height'])
(rows, cols, _) = x_img.shape
assert (cols == width)
assert (rows == height)
(resized_width, resized_height) = get_new_img_size(width, height, C.im_size)
x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)
try:
(y_rpn_cls, y_rpn_regr) = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)
except:
continue
x_img = x_img[(:, :, (2, 1, 0))]
x_img = x_img.astype(np.float32)
x_img[(:, :, 0)] -= C.img_channel_mean[0]
x_img[(:, :, 1)] -= C.img_channel_mean[1]
x_img[(:, :, 2)] -= C.img_channel_mean[2]
x_img /= C.img_scaling_factor
x_img = np.transpose(x_img, (2, 0, 1))
x_img = np.expand_dims(x_img, axis=0)
y_rpn_regr[(:, (y_rpn_regr.shape[1] // 2):, :, :)] *= C.std_scaling
if (backend == 'tf'):
x_img = np.transpose(x_img, (0, 2, 3, 1))
y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))
y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))
(yield (np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug))
except Exception as e:
print(e)
continue | def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):
'\n\t函数输入:\n\t图片信息\n\t类别统计信息\n\t训练信息类\n\t计算输出特征图大小的函数\n\tkeras用的什么内核\n\t是否为训练\n\n\t函数输出:\n\t图片\n\t数据对象:第一个是是否包含对象,第二个是回归梯度\n\t增强后的图片信息\n\t'
sample_selector = SampleSelector(class_count)
while True:
if (mode == 'train'):
np.random.shuffle(all_img_data)
for img_data in all_img_data:
try:
if (C.balanced_classes and sample_selector.skip_sample_for_balanced_class(img_data)):
continue
if (mode == 'train'):
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=True)
else:
(img_data_aug, x_img) = data_augment.augment(img_data, C, augment=False)
(width, height) = (img_data_aug['width'], img_data_aug['height'])
(rows, cols, _) = x_img.shape
assert (cols == width)
assert (rows == height)
(resized_width, resized_height) = get_new_img_size(width, height, C.im_size)
x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)
try:
(y_rpn_cls, y_rpn_regr) = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)
except:
continue
x_img = x_img[(:, :, (2, 1, 0))]
x_img = x_img.astype(np.float32)
x_img[(:, :, 0)] -= C.img_channel_mean[0]
x_img[(:, :, 1)] -= C.img_channel_mean[1]
x_img[(:, :, 2)] -= C.img_channel_mean[2]
x_img /= C.img_scaling_factor
x_img = np.transpose(x_img, (2, 0, 1))
x_img = np.expand_dims(x_img, axis=0)
y_rpn_regr[(:, (y_rpn_regr.shape[1] // 2):, :, :)] *= C.std_scaling
if (backend == 'tf'):
x_img = np.transpose(x_img, (0, 2, 3, 1))
y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))
y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))
(yield (np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug))
except Exception as e:
print(e)
continue<|docstring|>函数输入:
图片信息
类别统计信息
训练信息类
计算输出特征图大小的函数
keras用的什么内核
是否为训练
函数输出:
图片
数据对象:第一个是是否包含对象,第二个是回归梯度
增强后的图片信息<|endoftext|> |
f990972f958a1fc0a5cc4c8fc3bf497ad9147c668fb2a41911526518a586e651 | def skip_sample_for_balanced_class(self, img_data):
"\n\t\t当输入一张图片时,决定是否要跳过该图片。该图片中包含需要的类返回False,否则返回True\n\t\t【注:cls_name = bbox['class']这是如何用键来取出值】\n\t\t"
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if (cls_name == self.curr_class):
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True | 当输入一张图片时,决定是否要跳过该图片。该图片中包含需要的类返回False,否则返回True
【注:cls_name = bbox['class']这是如何用键来取出值】 | keras_frcnn/data_generators.py | skip_sample_for_balanced_class | zouzhen/simple-faster-rcnn | 0 | python | def skip_sample_for_balanced_class(self, img_data):
"\n\t\t当输入一张图片时,决定是否要跳过该图片。该图片中包含需要的类返回False,否则返回True\n\t\t【注:cls_name = bbox['class']这是如何用键来取出值】\n\t\t"
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if (cls_name == self.curr_class):
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True | def skip_sample_for_balanced_class(self, img_data):
"\n\t\t当输入一张图片时,决定是否要跳过该图片。该图片中包含需要的类返回False,否则返回True\n\t\t【注:cls_name = bbox['class']这是如何用键来取出值】\n\t\t"
class_in_img = False
for bbox in img_data['bboxes']:
cls_name = bbox['class']
if (cls_name == self.curr_class):
class_in_img = True
self.curr_class = next(self.class_cycle)
break
if class_in_img:
return False
else:
return True<|docstring|>当输入一张图片时,决定是否要跳过该图片。该图片中包含需要的类返回False,否则返回True
【注:cls_name = bbox['class']这是如何用键来取出值】<|endoftext|> |
99c18e6fb6be3937880a24bff27b2bdcbd323277a3291306359b8a2ecab40a2f | def import_visualizationProject_add(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationProject(data.data)
data.clear_data() | table adds | SBaaS_visualization/visualization_project_io.py | import_visualizationProject_add | dmccloskey/SBaaS_visualization | 0 | python | def import_visualizationProject_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationProject(data.data)
data.clear_data() | def import_visualizationProject_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationProject(data.data)
data.clear_data()<|docstring|>table adds<|endoftext|> |
6ce3b6a16e850e73179317f1ae79720af68cea7d3a7d2729d25710450d7fb54f | def import_visualizationProject_update(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationProject(data.data)
data.clear_data() | table adds | SBaaS_visualization/visualization_project_io.py | import_visualizationProject_update | dmccloskey/SBaaS_visualization | 0 | python | def import_visualizationProject_update(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationProject(data.data)
data.clear_data() | def import_visualizationProject_update(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationProject(data.data)
data.clear_data()<|docstring|>table adds<|endoftext|> |
c63438242068d87c9d349547d5d0c49513e10e8088052e665675d13f48b87ab2 | def import_visualizationUser_add(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationUser(data.data)
data.clear_data() | table adds | SBaaS_visualization/visualization_project_io.py | import_visualizationUser_add | dmccloskey/SBaaS_visualization | 0 | python | def import_visualizationUser_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationUser(data.data)
data.clear_data() | def import_visualizationUser_add(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.add_visualizationUser(data.data)
data.clear_data()<|docstring|>table adds<|endoftext|> |
f829af0a31bb31c6857bb9a0a0abc5ee2e26ae9ed5d72add444d8260f8476069 | def import_visualizationUser_update(self, filename):
'table adds'
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationUser(data.data)
data.clear_data() | table adds | SBaaS_visualization/visualization_project_io.py | import_visualizationUser_update | dmccloskey/SBaaS_visualization | 0 | python | def import_visualizationUser_update(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationUser(data.data)
data.clear_data() | def import_visualizationUser_update(self, filename):
data = base_importData()
data.read_csv(filename)
data.format_data()
self.update_visualizationUser(data.data)
data.clear_data()<|docstring|>table adds<|endoftext|> |
1cfed390fa46f9c92ce4adaa7f3ca340691cd6f89e44969f1fbd1a8b278af7b9 | def export_visualizationProject_js(self, project_id_I, data_dir_I='tmp'):
'export visualization_project for visualization'
print('exporting visualization_project...')
data1_project = {}
data1_project = self.get_project_projectID_visualizationProject(project_id_I)
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
data2_O = []
data2_O = self.get_rows_projectID_visualizationProjectDescription(project_id_I)
data3_O = []
data3_O = self.get_rows_projectID_visualizationProjectStatus(project_id_I)
data1_keys = ['analysis_id', 'data_export_id', 'pipeline_id']
data1_nestkeys = ['data_export_id']
data1_keymap = {'buttonparameter': 'data_export_id', 'liparameter': 'analysis_id', 'buttontext': 'data_export_id', 'litext': 'analysis_id'}
data2_keys = ['project_id', 'project_section', 'project_heading', 'project_tileorder']
data2_nestkeys = ['project_id']
data2_keymap = {'htmlmediasrc': 'project_media', 'htmlmediaalt': '', 'htmlmediahref': 'project_href', 'htmlmediaheading': 'project_heading', 'htmlmediaparagraph': 'project_paragraph'}
data3_keys = ['project_id', 'pipeline_id', 'pipeline_progress']
data3_nestkeys = ['pipeline_id']
data3_keymap = {'xdata': 'pipeline_progress', 'ydata': 'pipeline_id', 'serieslabel': 'pipeline_id', 'featureslabel': 'pipeline_id', 'ydatalb': None, 'ydataub': None}
dataobject_O = []
parametersobject_O = []
tile2datamap_O = {}
tile_cnt = 0
row_cnt = 1
if data3_O:
cnt = 1
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(cnt))
rowid = ('row' + str(row_cnt))
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
dataobject_O.append({'data': data3_O, 'datakeys': data3_keys, 'datanestkeys': data3_nestkeys})
parametersobject_O.append(formtileparameters_O)
tile2datamap_O.update({'filtermenu1': [tile_cnt]})
cnt += 1
svgtileid = ('tilesvg' + str(tile_cnt))
svgid = ('svg' + str(tile_cnt))
colid = ('col' + str(cnt))
svgparameters1_O = {'svgtype': 'horizontalbarschart2d_01', 'svgkeymap': [data3_keymap], 'svgid': ('svg' + str(cnt)), 'svgmargin': {'top': 50, 'right': 150, 'bottom': 50, 'left': 150}, 'svgwidth': 350, 'svgheight': 250, 'svgy1axislabel': 'fraction'}
svgtileparameters1_O = {'tileheader': 'Project status', 'tiletype': 'svg', 'tileid': svgtileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters1_O.update(svgparameters1_O)
parametersobject_O.append(svgtileparameters1_O)
tile2datamap_O.update({svgtileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data2_O:
for (i, d) in enumerate(data2_O):
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(i))
rowid = ('row' + str(row_cnt))
tileheader = d['project_section']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6'}
htmlparameters = {'htmlkeymap': [data2_keymap], 'htmltype': 'media_01', 'htmlid': htmlid}
tileparameters.update(htmlparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': [d], 'datakeys': data2_keys, 'datanestkeys': data2_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data1_project:
data1_dict = {}
for data_export_id in data1_project['data_export_id']:
data1_dict[data_export_id] = []
for d in data1_O:
data1_dict[d['data_export_id']].append(d)
data1_keys = list(data1_dict.keys())
data1_keys.sort()
col_cnt = 0
for k in data1_keys:
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(col_cnt))
rowid = ('row' + str(row_cnt))
tileheader = data1_dict[k][0]['pipeline_id']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}}
hrefparameters = {'hrefurl': 'project.html', 'htmlkeymap': [data1_keymap], 'htmltype': 'href_02', 'htmlid': htmlid}
tileparameters.update(hrefparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': data1_dict[k], 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
col_cnt += 1
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=None)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) | export visualization_project for visualization | SBaaS_visualization/visualization_project_io.py | export_visualizationProject_js | dmccloskey/SBaaS_visualization | 0 | python | def export_visualizationProject_js(self, project_id_I, data_dir_I='tmp'):
print('exporting visualization_project...')
data1_project = {}
data1_project = self.get_project_projectID_visualizationProject(project_id_I)
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
data2_O = []
data2_O = self.get_rows_projectID_visualizationProjectDescription(project_id_I)
data3_O = []
data3_O = self.get_rows_projectID_visualizationProjectStatus(project_id_I)
data1_keys = ['analysis_id', 'data_export_id', 'pipeline_id']
data1_nestkeys = ['data_export_id']
data1_keymap = {'buttonparameter': 'data_export_id', 'liparameter': 'analysis_id', 'buttontext': 'data_export_id', 'litext': 'analysis_id'}
data2_keys = ['project_id', 'project_section', 'project_heading', 'project_tileorder']
data2_nestkeys = ['project_id']
data2_keymap = {'htmlmediasrc': 'project_media', 'htmlmediaalt': , 'htmlmediahref': 'project_href', 'htmlmediaheading': 'project_heading', 'htmlmediaparagraph': 'project_paragraph'}
data3_keys = ['project_id', 'pipeline_id', 'pipeline_progress']
data3_nestkeys = ['pipeline_id']
data3_keymap = {'xdata': 'pipeline_progress', 'ydata': 'pipeline_id', 'serieslabel': 'pipeline_id', 'featureslabel': 'pipeline_id', 'ydatalb': None, 'ydataub': None}
dataobject_O = []
parametersobject_O = []
tile2datamap_O = {}
tile_cnt = 0
row_cnt = 1
if data3_O:
cnt = 1
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(cnt))
rowid = ('row' + str(row_cnt))
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
dataobject_O.append({'data': data3_O, 'datakeys': data3_keys, 'datanestkeys': data3_nestkeys})
parametersobject_O.append(formtileparameters_O)
tile2datamap_O.update({'filtermenu1': [tile_cnt]})
cnt += 1
svgtileid = ('tilesvg' + str(tile_cnt))
svgid = ('svg' + str(tile_cnt))
colid = ('col' + str(cnt))
svgparameters1_O = {'svgtype': 'horizontalbarschart2d_01', 'svgkeymap': [data3_keymap], 'svgid': ('svg' + str(cnt)), 'svgmargin': {'top': 50, 'right': 150, 'bottom': 50, 'left': 150}, 'svgwidth': 350, 'svgheight': 250, 'svgy1axislabel': 'fraction'}
svgtileparameters1_O = {'tileheader': 'Project status', 'tiletype': 'svg', 'tileid': svgtileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters1_O.update(svgparameters1_O)
parametersobject_O.append(svgtileparameters1_O)
tile2datamap_O.update({svgtileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data2_O:
for (i, d) in enumerate(data2_O):
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(i))
rowid = ('row' + str(row_cnt))
tileheader = d['project_section']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6'}
htmlparameters = {'htmlkeymap': [data2_keymap], 'htmltype': 'media_01', 'htmlid': htmlid}
tileparameters.update(htmlparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': [d], 'datakeys': data2_keys, 'datanestkeys': data2_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data1_project:
data1_dict = {}
for data_export_id in data1_project['data_export_id']:
data1_dict[data_export_id] = []
for d in data1_O:
data1_dict[d['data_export_id']].append(d)
data1_keys = list(data1_dict.keys())
data1_keys.sort()
col_cnt = 0
for k in data1_keys:
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(col_cnt))
rowid = ('row' + str(row_cnt))
tileheader = data1_dict[k][0]['pipeline_id']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}}
hrefparameters = {'hrefurl': 'project.html', 'htmlkeymap': [data1_keymap], 'htmltype': 'href_02', 'htmlid': htmlid}
tileparameters.update(hrefparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': data1_dict[k], 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
col_cnt += 1
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=None)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects()) | def export_visualizationProject_js(self, project_id_I, data_dir_I='tmp'):
print('exporting visualization_project...')
data1_project = {}
data1_project = self.get_project_projectID_visualizationProject(project_id_I)
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
data2_O = []
data2_O = self.get_rows_projectID_visualizationProjectDescription(project_id_I)
data3_O = []
data3_O = self.get_rows_projectID_visualizationProjectStatus(project_id_I)
data1_keys = ['analysis_id', 'data_export_id', 'pipeline_id']
data1_nestkeys = ['data_export_id']
data1_keymap = {'buttonparameter': 'data_export_id', 'liparameter': 'analysis_id', 'buttontext': 'data_export_id', 'litext': 'analysis_id'}
data2_keys = ['project_id', 'project_section', 'project_heading', 'project_tileorder']
data2_nestkeys = ['project_id']
data2_keymap = {'htmlmediasrc': 'project_media', 'htmlmediaalt': , 'htmlmediahref': 'project_href', 'htmlmediaheading': 'project_heading', 'htmlmediaparagraph': 'project_paragraph'}
data3_keys = ['project_id', 'pipeline_id', 'pipeline_progress']
data3_nestkeys = ['pipeline_id']
data3_keymap = {'xdata': 'pipeline_progress', 'ydata': 'pipeline_id', 'serieslabel': 'pipeline_id', 'featureslabel': 'pipeline_id', 'ydatalb': None, 'ydataub': None}
dataobject_O = []
parametersobject_O = []
tile2datamap_O = {}
tile_cnt = 0
row_cnt = 1
if data3_O:
cnt = 1
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(cnt))
rowid = ('row' + str(row_cnt))
formtileparameters_O = {'tileheader': 'Filter menu', 'tiletype': 'html', 'tileid': 'filtermenu1', 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-4'}
formparameters_O = {'htmlid': 'filtermenuform1', 'htmltype': 'form_01', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}, 'formresetbuttonidtext': {'id': 'reset1', 'text': 'reset'}, 'formupdatebuttonidtext': {'id': 'update1', 'text': 'update'}}
formtileparameters_O.update(formparameters_O)
dataobject_O.append({'data': data3_O, 'datakeys': data3_keys, 'datanestkeys': data3_nestkeys})
parametersobject_O.append(formtileparameters_O)
tile2datamap_O.update({'filtermenu1': [tile_cnt]})
cnt += 1
svgtileid = ('tilesvg' + str(tile_cnt))
svgid = ('svg' + str(tile_cnt))
colid = ('col' + str(cnt))
svgparameters1_O = {'svgtype': 'horizontalbarschart2d_01', 'svgkeymap': [data3_keymap], 'svgid': ('svg' + str(cnt)), 'svgmargin': {'top': 50, 'right': 150, 'bottom': 50, 'left': 150}, 'svgwidth': 350, 'svgheight': 250, 'svgy1axislabel': 'fraction'}
svgtileparameters1_O = {'tileheader': 'Project status', 'tiletype': 'svg', 'tileid': svgtileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-8'}
svgtileparameters1_O.update(svgparameters1_O)
parametersobject_O.append(svgtileparameters1_O)
tile2datamap_O.update({svgtileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data2_O:
for (i, d) in enumerate(data2_O):
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(i))
rowid = ('row' + str(row_cnt))
tileheader = d['project_section']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6'}
htmlparameters = {'htmlkeymap': [data2_keymap], 'htmltype': 'media_01', 'htmlid': htmlid}
tileparameters.update(htmlparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': [d], 'datakeys': data2_keys, 'datanestkeys': data2_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
row_cnt += 1
if data1_project:
data1_dict = {}
for data_export_id in data1_project['data_export_id']:
data1_dict[data_export_id] = []
for d in data1_O:
data1_dict[d['data_export_id']].append(d)
data1_keys = list(data1_dict.keys())
data1_keys.sort()
col_cnt = 0
for k in data1_keys:
tileid = ('tile' + str(tile_cnt))
colid = ('col' + str(col_cnt))
rowid = ('row' + str(row_cnt))
tileheader = data1_dict[k][0]['pipeline_id']
htmlid = ('html' + str(tile_cnt))
tileparameters = {'tileheader': tileheader, 'tiletype': 'html', 'tileid': tileid, 'rowid': rowid, 'colid': colid, 'tileclass': 'panel panel-default', 'rowclass': 'row', 'colclass': 'col-sm-6', 'formsubmitbuttonidtext': {'id': 'submit1', 'text': 'submit'}}
hrefparameters = {'hrefurl': 'project.html', 'htmlkeymap': [data1_keymap], 'htmltype': 'href_02', 'htmlid': htmlid}
tileparameters.update(hrefparameters)
parametersobject_O.append(tileparameters)
dataobject_O.append({'data': data1_dict[k], 'datakeys': data1_keys, 'datanestkeys': data1_nestkeys})
tile2datamap_O.update({tileid: [tile_cnt]})
tile_cnt += 1
col_cnt += 1
ddtutilities = ddt_container(parameters_I=parametersobject_O, data_I=dataobject_O, tile2datamap_I=tile2datamap_O, filtermenu_I=None)
if (data_dir_I == 'tmp'):
filename_str = (self.settings['visualization_data'] + '/tmp/ddt_data.js')
elif (data_dir_I == 'data_json'):
data_json_O = ddtutilities.get_allObjects_js()
return data_json_O
with open(filename_str, 'w') as file:
file.write(ddtutilities.get_allObjects())<|docstring|>export visualization_project for visualization<|endoftext|> |
8f52178042ff896999245da33cee42a2237839ad01e9c49e57d6306cab73ae20 | def export_visualizationProject_csv(self, project_id_I, filename_O):
'export the visualization project to csv'
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
io = base_exportData(data1_O)
io.write_dict2csv(filename_O) | export the visualization project to csv | SBaaS_visualization/visualization_project_io.py | export_visualizationProject_csv | dmccloskey/SBaaS_visualization | 0 | python | def export_visualizationProject_csv(self, project_id_I, filename_O):
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
io = base_exportData(data1_O)
io.write_dict2csv(filename_O) | def export_visualizationProject_csv(self, project_id_I, filename_O):
data1_O = []
data1_O = self.get_rows_projectID_visualizationProject(project_id_I)
io = base_exportData(data1_O)
io.write_dict2csv(filename_O)<|docstring|>export the visualization project to csv<|endoftext|> |
c9d54ce187aa308eea231faf78ae3f2c8ebb6a00b101218ab12b80130d551a29 | def yaml_load(yaml_str):
'Wrap YAML load library.'
yml = YAML(typ='safe')
return yml.load(yaml_str) | Wrap YAML load library. | faucet/config_parser_util.py | yaml_load | dangervon/faucet | 393 | python | def yaml_load(yaml_str):
yml = YAML(typ='safe')
return yml.load(yaml_str) | def yaml_load(yaml_str):
yml = YAML(typ='safe')
return yml.load(yaml_str)<|docstring|>Wrap YAML load library.<|endoftext|> |
fb40df39879feca5967384e7a83bc342ca6b5cda906f88174afba84e381b790b | def yaml_dump(yaml_dict):
'Wrap YAML dump library.'
with StringIO() as stream:
yml = YAML(typ='safe')
yml.dump(yaml_dict, stream=stream)
return stream.getvalue() | Wrap YAML dump library. | faucet/config_parser_util.py | yaml_dump | dangervon/faucet | 393 | python | def yaml_dump(yaml_dict):
with StringIO() as stream:
yml = YAML(typ='safe')
yml.dump(yaml_dict, stream=stream)
return stream.getvalue() | def yaml_dump(yaml_dict):
with StringIO() as stream:
yml = YAML(typ='safe')
yml.dump(yaml_dict, stream=stream)
return stream.getvalue()<|docstring|>Wrap YAML dump library.<|endoftext|> |
62674c90168d411cb8b5c163473971d4e8e8bb625b2fef3824b5d86353ffcdb8 | def get_logger(logname):
'Return logger instance for config parsing.'
return logging.getLogger((logname + '.config')) | Return logger instance for config parsing. | faucet/config_parser_util.py | get_logger | dangervon/faucet | 393 | python | def get_logger(logname):
return logging.getLogger((logname + '.config')) | def get_logger(logname):
return logging.getLogger((logname + '.config'))<|docstring|>Return logger instance for config parsing.<|endoftext|> |
822aa163d469522bc20246ac4300478ee59d79100a5524fa2415360e595c9bf3 | def read_config(config_file, logname):
'Return a parsed YAML config file or None.'
logger = get_logger(logname)
conf_txt = None
conf = None
try:
with open(config_file, 'r', encoding='utf-8') as stream:
conf_txt = stream.read()
conf = yaml_load(conf_txt)
except (TypeError, UnicodeDecodeError, PermissionError, ValueError, ScannerError, DuplicateKeyError, ComposerError, ConstructorError, ParserError) as err:
logger.error('Error in file %s (%s)', config_file, str(err))
except FileNotFoundError as err:
logger.error('Could not find requested file: %s (%s)', config_file, str(err))
return (conf, conf_txt) | Return a parsed YAML config file or None. | faucet/config_parser_util.py | read_config | dangervon/faucet | 393 | python | def read_config(config_file, logname):
logger = get_logger(logname)
conf_txt = None
conf = None
try:
with open(config_file, 'r', encoding='utf-8') as stream:
conf_txt = stream.read()
conf = yaml_load(conf_txt)
except (TypeError, UnicodeDecodeError, PermissionError, ValueError, ScannerError, DuplicateKeyError, ComposerError, ConstructorError, ParserError) as err:
logger.error('Error in file %s (%s)', config_file, str(err))
except FileNotFoundError as err:
logger.error('Could not find requested file: %s (%s)', config_file, str(err))
return (conf, conf_txt) | def read_config(config_file, logname):
logger = get_logger(logname)
conf_txt = None
conf = None
try:
with open(config_file, 'r', encoding='utf-8') as stream:
conf_txt = stream.read()
conf = yaml_load(conf_txt)
except (TypeError, UnicodeDecodeError, PermissionError, ValueError, ScannerError, DuplicateKeyError, ComposerError, ConstructorError, ParserError) as err:
logger.error('Error in file %s (%s)', config_file, str(err))
except FileNotFoundError as err:
logger.error('Could not find requested file: %s (%s)', config_file, str(err))
return (conf, conf_txt)<|docstring|>Return a parsed YAML config file or None.<|endoftext|> |
c1bf4f61bbf9a1258f5ad7e791fdf1ba3298552f0faf6a0bc65a1241fa3e6ba2 | def config_hash_content(content):
'Return hash of config file content.'
config_hash = getattr(hashlib, CONFIG_HASH_FUNC)
return config_hash(content.encode('utf-8')).hexdigest() | Return hash of config file content. | faucet/config_parser_util.py | config_hash_content | dangervon/faucet | 393 | python | def config_hash_content(content):
config_hash = getattr(hashlib, CONFIG_HASH_FUNC)
return config_hash(content.encode('utf-8')).hexdigest() | def config_hash_content(content):
config_hash = getattr(hashlib, CONFIG_HASH_FUNC)
return config_hash(content.encode('utf-8')).hexdigest()<|docstring|>Return hash of config file content.<|endoftext|> |
b5e6769d764f74c70a0ee7eb14c34a3a606cd3664e5d8584a177aaa013e411fb | def config_file_hash(config_file_name):
'Return hash of YAML config file contents.'
with open(config_file_name, encoding='utf-8') as config_file:
return config_hash_content(config_file.read()) | Return hash of YAML config file contents. | faucet/config_parser_util.py | config_file_hash | dangervon/faucet | 393 | python | def config_file_hash(config_file_name):
with open(config_file_name, encoding='utf-8') as config_file:
return config_hash_content(config_file.read()) | def config_file_hash(config_file_name):
with open(config_file_name, encoding='utf-8') as config_file:
return config_hash_content(config_file.read())<|docstring|>Return hash of YAML config file contents.<|endoftext|> |
5321fc10b27ff2f8495f9816e0143e9df147ed6dc7056a7acec957a071fd0587 | def dp_config_path(config_file, parent_file=None):
'Return full path to config file.'
if (parent_file and (not os.path.isabs(config_file))):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
return os.path.realpath(config_file) | Return full path to config file. | faucet/config_parser_util.py | dp_config_path | dangervon/faucet | 393 | python | def dp_config_path(config_file, parent_file=None):
if (parent_file and (not os.path.isabs(config_file))):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
return os.path.realpath(config_file) | def dp_config_path(config_file, parent_file=None):
if (parent_file and (not os.path.isabs(config_file))):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
return os.path.realpath(config_file)<|docstring|>Return full path to config file.<|endoftext|> |
49058b6442bf7c6268c32ddd4dc391c114d379ec013b36f00e803d46af2b26c8 | def dp_include(config_hashes, config_contents, config_file, logname, top_confs):
'Handles including additional config files'
logger = get_logger(logname)
if (not os.path.isfile(config_file)):
logger.warning('not a regular file or does not exist: %s', config_file)
return False
(conf, config_content) = read_config(config_file, logname)
if (not conf):
logger.warning('error loading config from file: %s', config_file)
return False
valid_conf_keys = set(top_confs.keys()).union({'include', 'include-optional', 'version'})
unknown_top_confs = (set(conf.keys()) - valid_conf_keys)
if unknown_top_confs:
logger.error('unknown top level config items: %s', unknown_top_confs)
return False
new_config_hashes = config_hashes.copy()
new_config_hashes[config_file] = config_hash_content(config_content)
new_config_contents = config_contents.copy()
new_config_contents[config_file] = config_content
new_top_confs = {}
for (conf_name, curr_conf) in top_confs.items():
new_top_confs[conf_name] = curr_conf.copy()
try:
new_top_confs[conf_name].update(conf.pop(conf_name, {}))
except (TypeError, ValueError):
logger.error('Invalid config for "%s"', conf_name)
return False
for (include_directive, file_required) in (('include', True), ('include-optional', False)):
include_values = conf.pop(include_directive, [])
if (not isinstance(include_values, list)):
logger.error('Include directive is not in a valid format')
return False
for include_file in include_values:
if (not isinstance(include_file, str)):
include_file = str(include_file)
include_path = dp_config_path(include_file, parent_file=config_file)
logger.info('including file: %s', include_path)
if (include_path in config_hashes):
logger.error('include file %s already loaded, include loop found in file: %s', include_path, config_file)
return False
if (not dp_include(new_config_hashes, config_contents, include_path, logname, new_top_confs)):
if file_required:
logger.error('unable to load required include file: %s', include_path)
return False
new_config_hashes[include_path] = None
logger.warning('skipping optional include file: %s', include_path)
config_hashes.update(new_config_hashes)
config_contents.update(new_config_contents)
for (conf_name, new_conf) in new_top_confs.items():
top_confs[conf_name].update(new_conf)
return True | Handles including additional config files | faucet/config_parser_util.py | dp_include | dangervon/faucet | 393 | python | def dp_include(config_hashes, config_contents, config_file, logname, top_confs):
logger = get_logger(logname)
if (not os.path.isfile(config_file)):
logger.warning('not a regular file or does not exist: %s', config_file)
return False
(conf, config_content) = read_config(config_file, logname)
if (not conf):
logger.warning('error loading config from file: %s', config_file)
return False
valid_conf_keys = set(top_confs.keys()).union({'include', 'include-optional', 'version'})
unknown_top_confs = (set(conf.keys()) - valid_conf_keys)
if unknown_top_confs:
logger.error('unknown top level config items: %s', unknown_top_confs)
return False
new_config_hashes = config_hashes.copy()
new_config_hashes[config_file] = config_hash_content(config_content)
new_config_contents = config_contents.copy()
new_config_contents[config_file] = config_content
new_top_confs = {}
for (conf_name, curr_conf) in top_confs.items():
new_top_confs[conf_name] = curr_conf.copy()
try:
new_top_confs[conf_name].update(conf.pop(conf_name, {}))
except (TypeError, ValueError):
logger.error('Invalid config for "%s"', conf_name)
return False
for (include_directive, file_required) in (('include', True), ('include-optional', False)):
include_values = conf.pop(include_directive, [])
if (not isinstance(include_values, list)):
logger.error('Include directive is not in a valid format')
return False
for include_file in include_values:
if (not isinstance(include_file, str)):
include_file = str(include_file)
include_path = dp_config_path(include_file, parent_file=config_file)
logger.info('including file: %s', include_path)
if (include_path in config_hashes):
logger.error('include file %s already loaded, include loop found in file: %s', include_path, config_file)
return False
if (not dp_include(new_config_hashes, config_contents, include_path, logname, new_top_confs)):
if file_required:
logger.error('unable to load required include file: %s', include_path)
return False
new_config_hashes[include_path] = None
logger.warning('skipping optional include file: %s', include_path)
config_hashes.update(new_config_hashes)
config_contents.update(new_config_contents)
for (conf_name, new_conf) in new_top_confs.items():
top_confs[conf_name].update(new_conf)
return True | def dp_include(config_hashes, config_contents, config_file, logname, top_confs):
logger = get_logger(logname)
if (not os.path.isfile(config_file)):
logger.warning('not a regular file or does not exist: %s', config_file)
return False
(conf, config_content) = read_config(config_file, logname)
if (not conf):
logger.warning('error loading config from file: %s', config_file)
return False
valid_conf_keys = set(top_confs.keys()).union({'include', 'include-optional', 'version'})
unknown_top_confs = (set(conf.keys()) - valid_conf_keys)
if unknown_top_confs:
logger.error('unknown top level config items: %s', unknown_top_confs)
return False
new_config_hashes = config_hashes.copy()
new_config_hashes[config_file] = config_hash_content(config_content)
new_config_contents = config_contents.copy()
new_config_contents[config_file] = config_content
new_top_confs = {}
for (conf_name, curr_conf) in top_confs.items():
new_top_confs[conf_name] = curr_conf.copy()
try:
new_top_confs[conf_name].update(conf.pop(conf_name, {}))
except (TypeError, ValueError):
logger.error('Invalid config for "%s"', conf_name)
return False
for (include_directive, file_required) in (('include', True), ('include-optional', False)):
include_values = conf.pop(include_directive, [])
if (not isinstance(include_values, list)):
logger.error('Include directive is not in a valid format')
return False
for include_file in include_values:
if (not isinstance(include_file, str)):
include_file = str(include_file)
include_path = dp_config_path(include_file, parent_file=config_file)
logger.info('including file: %s', include_path)
if (include_path in config_hashes):
logger.error('include file %s already loaded, include loop found in file: %s', include_path, config_file)
return False
if (not dp_include(new_config_hashes, config_contents, include_path, logname, new_top_confs)):
if file_required:
logger.error('unable to load required include file: %s', include_path)
return False
new_config_hashes[include_path] = None
logger.warning('skipping optional include file: %s', include_path)
config_hashes.update(new_config_hashes)
config_contents.update(new_config_contents)
for (conf_name, new_conf) in new_top_confs.items():
top_confs[conf_name].update(new_conf)
return True<|docstring|>Handles including additional config files<|endoftext|> |
c0b88f16e5d0cd26da0c4f86f4b5499ee10684c1f558be8ef2d796fb839b1c7e | def config_changed(top_config_file, new_top_config_file, config_hashes):
'Return True if configuration has changed.\n\n Args:\n top_config_file (str): name of FAUCET config file\n new_top_config_file (str): name, possibly new, of FAUCET config file.\n config_hashes (dict): map of config file/includes and hashes of contents.\n Returns:\n bool: True if the file, or any file it includes, has changed.\n '
if (new_top_config_file != top_config_file):
return True
if ((config_hashes is None) or (new_top_config_file is None)):
return False
for (config_file, config_hash) in config_hashes.items():
config_file_exists = os.path.isfile(config_file)
if ((config_hash is None) and config_file_exists):
return True
if (config_hash and (not config_file_exists)):
return True
if config_file_exists:
new_config_hash = config_file_hash(config_file)
if (new_config_hash != config_hash):
return True
return False | Return True if configuration has changed.
Args:
top_config_file (str): name of FAUCET config file
new_top_config_file (str): name, possibly new, of FAUCET config file.
config_hashes (dict): map of config file/includes and hashes of contents.
Returns:
bool: True if the file, or any file it includes, has changed. | faucet/config_parser_util.py | config_changed | dangervon/faucet | 393 | python | def config_changed(top_config_file, new_top_config_file, config_hashes):
'Return True if configuration has changed.\n\n Args:\n top_config_file (str): name of FAUCET config file\n new_top_config_file (str): name, possibly new, of FAUCET config file.\n config_hashes (dict): map of config file/includes and hashes of contents.\n Returns:\n bool: True if the file, or any file it includes, has changed.\n '
if (new_top_config_file != top_config_file):
return True
if ((config_hashes is None) or (new_top_config_file is None)):
return False
for (config_file, config_hash) in config_hashes.items():
config_file_exists = os.path.isfile(config_file)
if ((config_hash is None) and config_file_exists):
return True
if (config_hash and (not config_file_exists)):
return True
if config_file_exists:
new_config_hash = config_file_hash(config_file)
if (new_config_hash != config_hash):
return True
return False | def config_changed(top_config_file, new_top_config_file, config_hashes):
'Return True if configuration has changed.\n\n Args:\n top_config_file (str): name of FAUCET config file\n new_top_config_file (str): name, possibly new, of FAUCET config file.\n config_hashes (dict): map of config file/includes and hashes of contents.\n Returns:\n bool: True if the file, or any file it includes, has changed.\n '
if (new_top_config_file != top_config_file):
return True
if ((config_hashes is None) or (new_top_config_file is None)):
return False
for (config_file, config_hash) in config_hashes.items():
config_file_exists = os.path.isfile(config_file)
if ((config_hash is None) and config_file_exists):
return True
if (config_hash and (not config_file_exists)):
return True
if config_file_exists:
new_config_hash = config_file_hash(config_file)
if (new_config_hash != config_hash):
return True
return False<|docstring|>Return True if configuration has changed.
Args:
top_config_file (str): name of FAUCET config file
new_top_config_file (str): name, possibly new, of FAUCET config file.
config_hashes (dict): map of config file/includes and hashes of contents.
Returns:
bool: True if the file, or any file it includes, has changed.<|endoftext|> |
4c9b29cf12c9459d5619202ffed0ef0cbb2dab1329563de400b091eb8d81f4c1 | def _has_expired(self) -> bool:
'\n Evaluates whether the access token has expired.\n\n Returns\n -------\n has_expired : bool\n True if the access token has expired, otherwise False.\n\n '
if (time.time() > self._expiration):
return True
else:
return False | Evaluates whether the access token has expired.
Returns
-------
has_expired : bool
True if the access token has expired, otherwise False. | disruptive/authentication.py | _has_expired | friarswood/python-client | 8 | python | def _has_expired(self) -> bool:
'\n Evaluates whether the access token has expired.\n\n Returns\n -------\n has_expired : bool\n True if the access token has expired, otherwise False.\n\n '
if (time.time() > self._expiration):
return True
else:
return False | def _has_expired(self) -> bool:
'\n Evaluates whether the access token has expired.\n\n Returns\n -------\n has_expired : bool\n True if the access token has expired, otherwise False.\n\n '
if (time.time() > self._expiration):
return True
else:
return False<|docstring|>Evaluates whether the access token has expired.
Returns
-------
has_expired : bool
True if the access token has expired, otherwise False.<|endoftext|> |
447f40ad9330c459dd733d939ed1cd32c1c8e20b559186bb6c3e61c59c35ab0d | def get_token(self) -> str:
'\n Returns the access token.\n If the token has expired, renew it.\n\n Returns\n -------\n token : str\n Access token added to the request header.\n\n '
if self._has_expired():
self.refresh()
return self._token | Returns the access token.
If the token has expired, renew it.
Returns
-------
token : str
Access token added to the request header. | disruptive/authentication.py | get_token | friarswood/python-client | 8 | python | def get_token(self) -> str:
'\n Returns the access token.\n If the token has expired, renew it.\n\n Returns\n -------\n token : str\n Access token added to the request header.\n\n '
if self._has_expired():
self.refresh()
return self._token | def get_token(self) -> str:
'\n Returns the access token.\n If the token has expired, renew it.\n\n Returns\n -------\n token : str\n Access token added to the request header.\n\n '
if self._has_expired():
self.refresh()
return self._token<|docstring|>Returns the access token.
If the token has expired, renew it.
Returns
-------
token : str
Access token added to the request header.<|endoftext|> |
69390d895e3068f79802ffcae2f582b39fa3f9856ad1adfd6333cc605cc98b5a | def refresh(self) -> None:
'\n This function does nothing and is overwritten in all\n child classes. It only exists for consistency purposes\n as it is called in get_token().\n\n '
pass | This function does nothing and is overwritten in all
child classes. It only exists for consistency purposes
as it is called in get_token(). | disruptive/authentication.py | refresh | friarswood/python-client | 8 | python | def refresh(self) -> None:
'\n This function does nothing and is overwritten in all\n child classes. It only exists for consistency purposes\n as it is called in get_token().\n\n '
pass | def refresh(self) -> None:
'\n This function does nothing and is overwritten in all\n child classes. It only exists for consistency purposes\n as it is called in get_token().\n\n '
pass<|docstring|>This function does nothing and is overwritten in all
child classes. It only exists for consistency purposes
as it is called in get_token().<|endoftext|> |
abae14fb422a8dc80c3a3acf2d22651723adb36b69fb4038105ae2231e769526 | def refresh(self) -> None:
'\n If called, this function does nothing but raise an error as no\n authentication routine has been called to update the configuration\n variable, nor has an authentication object been provided.\n\n Raises\n ------\n Unauthorized\n If neither default_auth has been set nor the\n auth kwarg has been provided.\n\n '
msg = 'Missing Service Account credentials.\n\nEither set the following environment variables:\n\n DT_SERVICE_ACCOUNT_KEY_ID: Unique Service Account key ID.\n DT_SERVICE_ACCOUNT_SECRET: Unique Service Account secret.\n DT_SERVICE_ACCOUNT_EMAIL: Unique Service Account email.\n\nor provide them programmatically:\n\n import disruptive as dt\n\n dt.default_auth = dt.Auth.service_account(\n key_id="<SERVICE_ACCOUNT_KEY_ID>",\n secret="<SERVICE_ACCOUNT_SECRET>",\n email="<SERVICE_ACCOUNT_EMAIL>",\n )\n\nSee https://developer.d21s.com/api/libraries/python/client/authentication.html for more details.\n'
raise dterrors.Unauthorized(msg) | If called, this function does nothing but raise an error as no
authentication routine has been called to update the configuration
variable, nor has an authentication object been provided.
Raises
------
Unauthorized
If neither default_auth has been set nor the
auth kwarg has been provided. | disruptive/authentication.py | refresh | friarswood/python-client | 8 | python | def refresh(self) -> None:
'\n If called, this function does nothing but raise an error as no\n authentication routine has been called to update the configuration\n variable, nor has an authentication object been provided.\n\n Raises\n ------\n Unauthorized\n If neither default_auth has been set nor the\n auth kwarg has been provided.\n\n '
msg = 'Missing Service Account credentials.\n\nEither set the following environment variables:\n\n DT_SERVICE_ACCOUNT_KEY_ID: Unique Service Account key ID.\n DT_SERVICE_ACCOUNT_SECRET: Unique Service Account secret.\n DT_SERVICE_ACCOUNT_EMAIL: Unique Service Account email.\n\nor provide them programmatically:\n\n import disruptive as dt\n\n dt.default_auth = dt.Auth.service_account(\n key_id="<SERVICE_ACCOUNT_KEY_ID>",\n secret="<SERVICE_ACCOUNT_SECRET>",\n email="<SERVICE_ACCOUNT_EMAIL>",\n )\n\nSee https://developer.d21s.com/api/libraries/python/client/authentication.html for more details.\n'
raise dterrors.Unauthorized(msg) | def refresh(self) -> None:
'\n If called, this function does nothing but raise an error as no\n authentication routine has been called to update the configuration\n variable, nor has an authentication object been provided.\n\n Raises\n ------\n Unauthorized\n If neither default_auth has been set nor the\n auth kwarg has been provided.\n\n '
msg = 'Missing Service Account credentials.\n\nEither set the following environment variables:\n\n DT_SERVICE_ACCOUNT_KEY_ID: Unique Service Account key ID.\n DT_SERVICE_ACCOUNT_SECRET: Unique Service Account secret.\n DT_SERVICE_ACCOUNT_EMAIL: Unique Service Account email.\n\nor provide them programmatically:\n\n import disruptive as dt\n\n dt.default_auth = dt.Auth.service_account(\n key_id="<SERVICE_ACCOUNT_KEY_ID>",\n secret="<SERVICE_ACCOUNT_SECRET>",\n email="<SERVICE_ACCOUNT_EMAIL>",\n )\n\nSee https://developer.d21s.com/api/libraries/python/client/authentication.html for more details.\n'
raise dterrors.Unauthorized(msg)<|docstring|>If called, this function does nothing but raise an error as no
authentication routine has been called to update the configuration
variable, nor has an authentication object been provided.
Raises
------
Unauthorized
If neither default_auth has been set nor the
auth kwarg has been provided.<|endoftext|> |
70cdd5600c9773445a319bc415def095460d5382f4748f13f808cff90c401822 | def refresh(self) -> None:
'\n Refreshes the access token.\n\n This first exchanges the JWT for an access token, then updates\n the expiration and token attributes with the response.\n\n '
response: dict = self._get_access_token()
self._expiration = (time.time() + response['expires_in'])
self._token = 'Bearer {}'.format(response['access_token']) | Refreshes the access token.
This first exchanges the JWT for an access token, then updates
the expiration and token attributes with the response. | disruptive/authentication.py | refresh | friarswood/python-client | 8 | python | def refresh(self) -> None:
'\n Refreshes the access token.\n\n This first exchanges the JWT for an access token, then updates\n the expiration and token attributes with the response.\n\n '
response: dict = self._get_access_token()
self._expiration = (time.time() + response['expires_in'])
self._token = 'Bearer {}'.format(response['access_token']) | def refresh(self) -> None:
'\n Refreshes the access token.\n\n This first exchanges the JWT for an access token, then updates\n the expiration and token attributes with the response.\n\n '
response: dict = self._get_access_token()
self._expiration = (time.time() + response['expires_in'])
self._token = 'Bearer {}'.format(response['access_token'])<|docstring|>Refreshes the access token.
This first exchanges the JWT for an access token, then updates
the expiration and token attributes with the response.<|endoftext|> |
1bbe9eeac44855f0a4e3d6c10b4596add56cf56f0c9f795c1b7b8cc064935017 | def _get_access_token(self) -> dict:
'\n Constructs and exchanges the JWT for an access token.\n\n Returns\n -------\n response : dict\n Dictionary containing expiration and the token itself.\n\n Raises\n ------\n BadRequest\n If the provided credentials could not be used for authentication.\n\n '
jwt_headers: dict[(str, str)] = {'alg': 'HS256', 'kid': self.key_id}
jwt_payload: dict[(str, Any)] = {'iat': int(time.time()), 'exp': (int(time.time()) + 3600), 'aud': self.token_endpoint, 'iss': self.email}
encoded_jwt: str = jwt.encode(payload=jwt_payload, key=self.secret, algorithm='HS256', headers=jwt_headers)
request_data: str = urllib.parse.urlencode({'assertion': encoded_jwt, 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer'})
try:
access_token_response: dict = dtrequests.DTRequest.post(url='', base_url=self.token_endpoint, data=request_data, headers={'Content-Type': 'application/x-www-form-urlencoded'}, skip_auth=True)
except dterrors.BadRequest:
raise dterrors.Unauthorized('Could not authenticate with the provided credentials.\n\nRead more: https://developer.d21s.com/docs/authentication/oauth2#common-errors')
return access_token_response | Constructs and exchanges the JWT for an access token.
Returns
-------
response : dict
Dictionary containing expiration and the token itself.
Raises
------
BadRequest
If the provided credentials could not be used for authentication. | disruptive/authentication.py | _get_access_token | friarswood/python-client | 8 | python | def _get_access_token(self) -> dict:
'\n Constructs and exchanges the JWT for an access token.\n\n Returns\n -------\n response : dict\n Dictionary containing expiration and the token itself.\n\n Raises\n ------\n BadRequest\n If the provided credentials could not be used for authentication.\n\n '
jwt_headers: dict[(str, str)] = {'alg': 'HS256', 'kid': self.key_id}
jwt_payload: dict[(str, Any)] = {'iat': int(time.time()), 'exp': (int(time.time()) + 3600), 'aud': self.token_endpoint, 'iss': self.email}
encoded_jwt: str = jwt.encode(payload=jwt_payload, key=self.secret, algorithm='HS256', headers=jwt_headers)
request_data: str = urllib.parse.urlencode({'assertion': encoded_jwt, 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer'})
try:
access_token_response: dict = dtrequests.DTRequest.post(url=, base_url=self.token_endpoint, data=request_data, headers={'Content-Type': 'application/x-www-form-urlencoded'}, skip_auth=True)
except dterrors.BadRequest:
raise dterrors.Unauthorized('Could not authenticate with the provided credentials.\n\nRead more: https://developer.d21s.com/docs/authentication/oauth2#common-errors')
return access_token_response | def _get_access_token(self) -> dict:
'\n Constructs and exchanges the JWT for an access token.\n\n Returns\n -------\n response : dict\n Dictionary containing expiration and the token itself.\n\n Raises\n ------\n BadRequest\n If the provided credentials could not be used for authentication.\n\n '
jwt_headers: dict[(str, str)] = {'alg': 'HS256', 'kid': self.key_id}
jwt_payload: dict[(str, Any)] = {'iat': int(time.time()), 'exp': (int(time.time()) + 3600), 'aud': self.token_endpoint, 'iss': self.email}
encoded_jwt: str = jwt.encode(payload=jwt_payload, key=self.secret, algorithm='HS256', headers=jwt_headers)
request_data: str = urllib.parse.urlencode({'assertion': encoded_jwt, 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer'})
try:
access_token_response: dict = dtrequests.DTRequest.post(url=, base_url=self.token_endpoint, data=request_data, headers={'Content-Type': 'application/x-www-form-urlencoded'}, skip_auth=True)
except dterrors.BadRequest:
raise dterrors.Unauthorized('Could not authenticate with the provided credentials.\n\nRead more: https://developer.d21s.com/docs/authentication/oauth2#common-errors')
return access_token_response<|docstring|>Constructs and exchanges the JWT for an access token.
Returns
-------
response : dict
Dictionary containing expiration and the token itself.
Raises
------
BadRequest
If the provided credentials could not be used for authentication.<|endoftext|> |
99ba1a21cbd1a739f120f7acb06ed3be5b11a0bae4fb7c115819d1d0ea0631ac | @classmethod
def service_account(cls, key_id: str, secret: str, email: str) -> ServiceAccountAuth:
'\n This method uses an OAuth2 authentication flow. With the provided\n credentials, a `JWT <https://jwt.io/>`_ is created and exchanged for\n an access token.\n\n Parameters\n ----------\n key_id : str\n Unique Service Account key ID.\n secret : str\n Service Account secret.\n email : str\n Unique Service Account email address.\n\n Returns\n -------\n auth : ServiceAccountAuth\n Object to initialize and maintain authentication to the REST API.\n\n Examples\n --------\n >>> # Authenticate using Service Account credentials.\n >>> dt.default_auth = dt.Auth.service_account(\n >>> key_id="<SERVICE_ACCOUNT_KEY_ID>",\n >>> secret="<SERVICE_ACCOUNT_KEY_ID>",\n >>> email="<SERVICE_ACCOUNT_KEY_ID>",\n >>> )\n\n '
cls._verify_str_credentials({'key_id': key_id, 'secret': secret, 'email': email})
return ServiceAccountAuth(key_id, secret, email) | This method uses an OAuth2 authentication flow. With the provided
credentials, a `JWT <https://jwt.io/>`_ is created and exchanged for
an access token.
Parameters
----------
key_id : str
Unique Service Account key ID.
secret : str
Service Account secret.
email : str
Unique Service Account email address.
Returns
-------
auth : ServiceAccountAuth
Object to initialize and maintain authentication to the REST API.
Examples
--------
>>> # Authenticate using Service Account credentials.
>>> dt.default_auth = dt.Auth.service_account(
>>> key_id="<SERVICE_ACCOUNT_KEY_ID>",
>>> secret="<SERVICE_ACCOUNT_KEY_ID>",
>>> email="<SERVICE_ACCOUNT_KEY_ID>",
>>> ) | disruptive/authentication.py | service_account | friarswood/python-client | 8 | python | @classmethod
def service_account(cls, key_id: str, secret: str, email: str) -> ServiceAccountAuth:
'\n This method uses an OAuth2 authentication flow. With the provided\n credentials, a `JWT <https://jwt.io/>`_ is created and exchanged for\n an access token.\n\n Parameters\n ----------\n key_id : str\n Unique Service Account key ID.\n secret : str\n Service Account secret.\n email : str\n Unique Service Account email address.\n\n Returns\n -------\n auth : ServiceAccountAuth\n Object to initialize and maintain authentication to the REST API.\n\n Examples\n --------\n >>> # Authenticate using Service Account credentials.\n >>> dt.default_auth = dt.Auth.service_account(\n >>> key_id="<SERVICE_ACCOUNT_KEY_ID>",\n >>> secret="<SERVICE_ACCOUNT_KEY_ID>",\n >>> email="<SERVICE_ACCOUNT_KEY_ID>",\n >>> )\n\n '
cls._verify_str_credentials({'key_id': key_id, 'secret': secret, 'email': email})
return ServiceAccountAuth(key_id, secret, email) | @classmethod
def service_account(cls, key_id: str, secret: str, email: str) -> ServiceAccountAuth:
'\n This method uses an OAuth2 authentication flow. With the provided\n credentials, a `JWT <https://jwt.io/>`_ is created and exchanged for\n an access token.\n\n Parameters\n ----------\n key_id : str\n Unique Service Account key ID.\n secret : str\n Service Account secret.\n email : str\n Unique Service Account email address.\n\n Returns\n -------\n auth : ServiceAccountAuth\n Object to initialize and maintain authentication to the REST API.\n\n Examples\n --------\n >>> # Authenticate using Service Account credentials.\n >>> dt.default_auth = dt.Auth.service_account(\n >>> key_id="<SERVICE_ACCOUNT_KEY_ID>",\n >>> secret="<SERVICE_ACCOUNT_KEY_ID>",\n >>> email="<SERVICE_ACCOUNT_KEY_ID>",\n >>> )\n\n '
cls._verify_str_credentials({'key_id': key_id, 'secret': secret, 'email': email})
return ServiceAccountAuth(key_id, secret, email)<|docstring|>This method uses an OAuth2 authentication flow. With the provided
credentials, a `JWT <https://jwt.io/>`_ is created and exchanged for
an access token.
Parameters
----------
key_id : str
Unique Service Account key ID.
secret : str
Service Account secret.
email : str
Unique Service Account email address.
Returns
-------
auth : ServiceAccountAuth
Object to initialize and maintain authentication to the REST API.
Examples
--------
>>> # Authenticate using Service Account credentials.
>>> dt.default_auth = dt.Auth.service_account(
>>> key_id="<SERVICE_ACCOUNT_KEY_ID>",
>>> secret="<SERVICE_ACCOUNT_KEY_ID>",
>>> email="<SERVICE_ACCOUNT_KEY_ID>",
>>> )<|endoftext|> |
1983721c3527cd077737925680ff0716672456b50994729f5f87c96fb5bf0412 | @staticmethod
def _verify_str_credentials(credentials: dict) -> None:
"\n Verifies that the provided credentials are strings.\n\n This check is added as people use environment variables, but\n if for instance os.environ.get() does not find one, it silently\n returns None. It's better to just check for it early.\n\n Parameters\n ----------\n credentials : dict\n Credentials used to authenticate the REST API.\n\n "
for key in credentials:
if isinstance(credentials[key], str):
if (len(credentials[key]) == 0):
raise dterrors.ConfigurationError('Authentication credential <{}> is empty string.'.format(key))
else:
raise dterrors._raise_builtin(TypeError, 'Authentication credential <{}> got type <{}>. Expected <str>.'.format(key, type(credentials[key]).__name__)) | Verifies that the provided credentials are strings.
This check is added as people use environment variables, but
if for instance os.environ.get() does not find one, it silently
returns None. It's better to just check for it early.
Parameters
----------
credentials : dict
Credentials used to authenticate the REST API. | disruptive/authentication.py | _verify_str_credentials | friarswood/python-client | 8 | python | @staticmethod
def _verify_str_credentials(credentials: dict) -> None:
"\n Verifies that the provided credentials are strings.\n\n This check is added as people use environment variables, but\n if for instance os.environ.get() does not find one, it silently\n returns None. It's better to just check for it early.\n\n Parameters\n ----------\n credentials : dict\n Credentials used to authenticate the REST API.\n\n "
for key in credentials:
if isinstance(credentials[key], str):
if (len(credentials[key]) == 0):
raise dterrors.ConfigurationError('Authentication credential <{}> is empty string.'.format(key))
else:
raise dterrors._raise_builtin(TypeError, 'Authentication credential <{}> got type <{}>. Expected <str>.'.format(key, type(credentials[key]).__name__)) | @staticmethod
def _verify_str_credentials(credentials: dict) -> None:
"\n Verifies that the provided credentials are strings.\n\n This check is added as people use environment variables, but\n if for instance os.environ.get() does not find one, it silently\n returns None. It's better to just check for it early.\n\n Parameters\n ----------\n credentials : dict\n Credentials used to authenticate the REST API.\n\n "
for key in credentials:
if isinstance(credentials[key], str):
if (len(credentials[key]) == 0):
raise dterrors.ConfigurationError('Authentication credential <{}> is empty string.'.format(key))
else:
raise dterrors._raise_builtin(TypeError, 'Authentication credential <{}> got type <{}>. Expected <str>.'.format(key, type(credentials[key]).__name__))<|docstring|>Verifies that the provided credentials are strings.
This check is added as people use environment variables, but
if for instance os.environ.get() does not find one, it silently
returns None. It's better to just check for it early.
Parameters
----------
credentials : dict
Credentials used to authenticate the REST API.<|endoftext|> |
8966e0727754c2b602020e44ed6039c24f50a3f429fe1f15f1271c4d0fc05984 | def _hermitian_matrix_solve(matrix, rhs, method='default'):
'Matrix_solve using various methods.'
if (method == 'cholesky'):
if (matrix.dtype == tf.float32):
return tf.cholesky_solve(tf.cholesky(matrix), rhs)
else:
matrix_realimag = _complex_to_realimag(matrix)
n = matrix.shape[(- 1)]
rhs_realimag = tf.concat([tf.real(rhs), tf.imag(rhs)], axis=(- 2))
lhs_realimag = tf.cholesky_solve(tf.cholesky(matrix_realimag), rhs_realimag)
return tf.complex(lhs_realimag[(..., :n, :)], lhs_realimag[(..., n:, :)])
elif (method == 'ls'):
return tf.matrix_solve_ls(matrix, rhs)
elif (method == 'default'):
return tf.matrix_solve(matrix, rhs)
else:
raise ValueError(f'Unknown matrix solve method {method}.') | Matrix_solve using various methods. | models/train/multichannel_filtering.py | _hermitian_matrix_solve | marciopuga/sound-separation | 412 | python | def _hermitian_matrix_solve(matrix, rhs, method='default'):
if (method == 'cholesky'):
if (matrix.dtype == tf.float32):
return tf.cholesky_solve(tf.cholesky(matrix), rhs)
else:
matrix_realimag = _complex_to_realimag(matrix)
n = matrix.shape[(- 1)]
rhs_realimag = tf.concat([tf.real(rhs), tf.imag(rhs)], axis=(- 2))
lhs_realimag = tf.cholesky_solve(tf.cholesky(matrix_realimag), rhs_realimag)
return tf.complex(lhs_realimag[(..., :n, :)], lhs_realimag[(..., n:, :)])
elif (method == 'ls'):
return tf.matrix_solve_ls(matrix, rhs)
elif (method == 'default'):
return tf.matrix_solve(matrix, rhs)
else:
raise ValueError(f'Unknown matrix solve method {method}.') | def _hermitian_matrix_solve(matrix, rhs, method='default'):
if (method == 'cholesky'):
if (matrix.dtype == tf.float32):
return tf.cholesky_solve(tf.cholesky(matrix), rhs)
else:
matrix_realimag = _complex_to_realimag(matrix)
n = matrix.shape[(- 1)]
rhs_realimag = tf.concat([tf.real(rhs), tf.imag(rhs)], axis=(- 2))
lhs_realimag = tf.cholesky_solve(tf.cholesky(matrix_realimag), rhs_realimag)
return tf.complex(lhs_realimag[(..., :n, :)], lhs_realimag[(..., n:, :)])
elif (method == 'ls'):
return tf.matrix_solve_ls(matrix, rhs)
elif (method == 'default'):
return tf.matrix_solve(matrix, rhs)
else:
raise ValueError(f'Unknown matrix solve method {method}.')<|docstring|>Matrix_solve using various methods.<|endoftext|> |
f0cd1b38541be2d701e76a559b49983401864abf7531fb7e74af94bdef67236b | def _add_diagonal_matrix(ryy, diagload=0.001, epsilon=1e-08, use_diagonal_of=None):
'Regularize matrix usually before taking its inverse.\n\n Update ryy matrix with ryy += diagload * diag(matrix) + epsilon * I\n where matrix is either equal to ryy or another matrix given by\n use_diagonal_of parameter and I is the identity matrix and diag(.) is the\n diagonal matrix obtained from its argument.\n\n Args:\n ryy: A [..., mic, mic] complex64/float32 tensor, covariance matrix.\n diagload: A float32 value.\n epsilon: A float32 value.\n use_diagonal_of: None or another tensor [..., mic, mic] whose diagonal\n is used. If None, diagonal of ryy is used.\n\n Returns:\n [..., mic, mic] tensor, ryy + diagload * diag(use_diagonal_of) + epsilon*I.\n '
mic = signal_util.static_or_dynamic_dim_size(ryy, (- 1))
if (use_diagonal_of is None):
use_diagonal_of = ryy
diagonal_matrix = (((diagload * use_diagonal_of) + epsilon) * tf.eye(mic, dtype=ryy.dtype))
return (ryy + diagonal_matrix) | Regularize matrix usually before taking its inverse.
Update ryy matrix with ryy += diagload * diag(matrix) + epsilon * I
where matrix is either equal to ryy or another matrix given by
use_diagonal_of parameter and I is the identity matrix and diag(.) is the
diagonal matrix obtained from its argument.
Args:
ryy: A [..., mic, mic] complex64/float32 tensor, covariance matrix.
diagload: A float32 value.
epsilon: A float32 value.
use_diagonal_of: None or another tensor [..., mic, mic] whose diagonal
is used. If None, diagonal of ryy is used.
Returns:
[..., mic, mic] tensor, ryy + diagload * diag(use_diagonal_of) + epsilon*I. | models/train/multichannel_filtering.py | _add_diagonal_matrix | marciopuga/sound-separation | 412 | python | def _add_diagonal_matrix(ryy, diagload=0.001, epsilon=1e-08, use_diagonal_of=None):
'Regularize matrix usually before taking its inverse.\n\n Update ryy matrix with ryy += diagload * diag(matrix) + epsilon * I\n where matrix is either equal to ryy or another matrix given by\n use_diagonal_of parameter and I is the identity matrix and diag(.) is the\n diagonal matrix obtained from its argument.\n\n Args:\n ryy: A [..., mic, mic] complex64/float32 tensor, covariance matrix.\n diagload: A float32 value.\n epsilon: A float32 value.\n use_diagonal_of: None or another tensor [..., mic, mic] whose diagonal\n is used. If None, diagonal of ryy is used.\n\n Returns:\n [..., mic, mic] tensor, ryy + diagload * diag(use_diagonal_of) + epsilon*I.\n '
mic = signal_util.static_or_dynamic_dim_size(ryy, (- 1))
if (use_diagonal_of is None):
use_diagonal_of = ryy
diagonal_matrix = (((diagload * use_diagonal_of) + epsilon) * tf.eye(mic, dtype=ryy.dtype))
return (ryy + diagonal_matrix) | def _add_diagonal_matrix(ryy, diagload=0.001, epsilon=1e-08, use_diagonal_of=None):
'Regularize matrix usually before taking its inverse.\n\n Update ryy matrix with ryy += diagload * diag(matrix) + epsilon * I\n where matrix is either equal to ryy or another matrix given by\n use_diagonal_of parameter and I is the identity matrix and diag(.) is the\n diagonal matrix obtained from its argument.\n\n Args:\n ryy: A [..., mic, mic] complex64/float32 tensor, covariance matrix.\n diagload: A float32 value.\n epsilon: A float32 value.\n use_diagonal_of: None or another tensor [..., mic, mic] whose diagonal\n is used. If None, diagonal of ryy is used.\n\n Returns:\n [..., mic, mic] tensor, ryy + diagload * diag(use_diagonal_of) + epsilon*I.\n '
mic = signal_util.static_or_dynamic_dim_size(ryy, (- 1))
if (use_diagonal_of is None):
use_diagonal_of = ryy
diagonal_matrix = (((diagload * use_diagonal_of) + epsilon) * tf.eye(mic, dtype=ryy.dtype))
return (ryy + diagonal_matrix)<|docstring|>Regularize matrix usually before taking its inverse.
Update ryy matrix with ryy += diagload * diag(matrix) + epsilon * I
where matrix is either equal to ryy or another matrix given by
use_diagonal_of parameter and I is the identity matrix and diag(.) is the
diagonal matrix obtained from its argument.
Args:
ryy: A [..., mic, mic] complex64/float32 tensor, covariance matrix.
diagload: A float32 value.
epsilon: A float32 value.
use_diagonal_of: None or another tensor [..., mic, mic] whose diagonal
is used. If None, diagonal of ryy is used.
Returns:
[..., mic, mic] tensor, ryy + diagload * diag(use_diagonal_of) + epsilon*I.<|endoftext|> |
0159a9a2fa2807a20d37c0ddf7b4bac8088f1f80764ab379cca9a8bfb3b6c8e9 | def _get_beamformer_from_covariances(y_cov, t_cov, diagload=0.001, epsilon=1e-08, refmic=0, beamformer_type='wiener'):
"Calculates beamformers from full covariance estimates.\n\n Typically mixture signal covariance is estimated from the mixture signal and\n the target covariance is estimated using a mask-based covariance estimation.\n\n Args:\n y_cov: Mixture signal covariance of shape [..., mic, mic].\n t_cov: Source signal covariance estimate of shape [..., mic, mic, source].\n diagload: diagonal loading factor.\n epsilon: data-independent stabilizer for diagonal loading.\n refmic: Reference mic.\n beamformer_type: 'wiener' or 'mvdr' or 'mpdr'.\n Returns:\n beamformers w of shape [..., mic, source].\n "
y_cov_rank = tf.get_static_value(tf.rank(y_cov))
start = (y_cov_rank - 2)
prefix = list(range(start))
if (y_cov_rank < 2):
raise ValueError('Unsupported y_cov rank {}'.format(y_cov_rank))
if (beamformer_type == 'wiener'):
w = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov[(..., refmic, :)])
elif beamformer_type.startswith('mvdr'):
mu = 0.0
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
nt_cov = (tf.reduce_sum(t_cov, axis=(- 3), keepdims=True) - t_cov)
y_cov = tf.expand_dims(y_cov, axis=(- 3))
nt_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(nt_cov, diagload=0.01, epsilon=epsilon, use_diagonal_of=y_cov), t_cov)
scale = tf.reciprocal(((mu + tf.linalg.trace(nt_inv_t_matrix)) + 1e-08))
scale = tf.expand_dims(scale, (- 1))
w = (scale * nt_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
elif (beamformer_type == 'mpdr'):
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
y_cov = tf.expand_dims(y_cov, axis=(- 3))
y_cov = tf.broadcast_to(y_cov, tf.shape(t_cov))
y_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov)
scale = tf.reciprocal((tf.linalg.trace(y_inv_t_matrix) + 1e-08))
scale = tf.cast(tf.expand_dims(scale, (- 1)), dtype=y_cov.dtype)
w = (scale * y_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
else:
raise ValueError('Unknown beamformer type {}.'.format(beamformer_type))
return w | Calculates beamformers from full covariance estimates.
Typically mixture signal covariance is estimated from the mixture signal and
the target covariance is estimated using a mask-based covariance estimation.
Args:
y_cov: Mixture signal covariance of shape [..., mic, mic].
t_cov: Source signal covariance estimate of shape [..., mic, mic, source].
diagload: diagonal loading factor.
epsilon: data-independent stabilizer for diagonal loading.
refmic: Reference mic.
beamformer_type: 'wiener' or 'mvdr' or 'mpdr'.
Returns:
beamformers w of shape [..., mic, source]. | models/train/multichannel_filtering.py | _get_beamformer_from_covariances | marciopuga/sound-separation | 412 | python | def _get_beamformer_from_covariances(y_cov, t_cov, diagload=0.001, epsilon=1e-08, refmic=0, beamformer_type='wiener'):
"Calculates beamformers from full covariance estimates.\n\n Typically mixture signal covariance is estimated from the mixture signal and\n the target covariance is estimated using a mask-based covariance estimation.\n\n Args:\n y_cov: Mixture signal covariance of shape [..., mic, mic].\n t_cov: Source signal covariance estimate of shape [..., mic, mic, source].\n diagload: diagonal loading factor.\n epsilon: data-independent stabilizer for diagonal loading.\n refmic: Reference mic.\n beamformer_type: 'wiener' or 'mvdr' or 'mpdr'.\n Returns:\n beamformers w of shape [..., mic, source].\n "
y_cov_rank = tf.get_static_value(tf.rank(y_cov))
start = (y_cov_rank - 2)
prefix = list(range(start))
if (y_cov_rank < 2):
raise ValueError('Unsupported y_cov rank {}'.format(y_cov_rank))
if (beamformer_type == 'wiener'):
w = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov[(..., refmic, :)])
elif beamformer_type.startswith('mvdr'):
mu = 0.0
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
nt_cov = (tf.reduce_sum(t_cov, axis=(- 3), keepdims=True) - t_cov)
y_cov = tf.expand_dims(y_cov, axis=(- 3))
nt_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(nt_cov, diagload=0.01, epsilon=epsilon, use_diagonal_of=y_cov), t_cov)
scale = tf.reciprocal(((mu + tf.linalg.trace(nt_inv_t_matrix)) + 1e-08))
scale = tf.expand_dims(scale, (- 1))
w = (scale * nt_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
elif (beamformer_type == 'mpdr'):
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
y_cov = tf.expand_dims(y_cov, axis=(- 3))
y_cov = tf.broadcast_to(y_cov, tf.shape(t_cov))
y_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov)
scale = tf.reciprocal((tf.linalg.trace(y_inv_t_matrix) + 1e-08))
scale = tf.cast(tf.expand_dims(scale, (- 1)), dtype=y_cov.dtype)
w = (scale * y_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
else:
raise ValueError('Unknown beamformer type {}.'.format(beamformer_type))
return w | def _get_beamformer_from_covariances(y_cov, t_cov, diagload=0.001, epsilon=1e-08, refmic=0, beamformer_type='wiener'):
"Calculates beamformers from full covariance estimates.\n\n Typically mixture signal covariance is estimated from the mixture signal and\n the target covariance is estimated using a mask-based covariance estimation.\n\n Args:\n y_cov: Mixture signal covariance of shape [..., mic, mic].\n t_cov: Source signal covariance estimate of shape [..., mic, mic, source].\n diagload: diagonal loading factor.\n epsilon: data-independent stabilizer for diagonal loading.\n refmic: Reference mic.\n beamformer_type: 'wiener' or 'mvdr' or 'mpdr'.\n Returns:\n beamformers w of shape [..., mic, source].\n "
y_cov_rank = tf.get_static_value(tf.rank(y_cov))
start = (y_cov_rank - 2)
prefix = list(range(start))
if (y_cov_rank < 2):
raise ValueError('Unsupported y_cov rank {}'.format(y_cov_rank))
if (beamformer_type == 'wiener'):
w = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov[(..., refmic, :)])
elif beamformer_type.startswith('mvdr'):
mu = 0.0
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
nt_cov = (tf.reduce_sum(t_cov, axis=(- 3), keepdims=True) - t_cov)
y_cov = tf.expand_dims(y_cov, axis=(- 3))
nt_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(nt_cov, diagload=0.01, epsilon=epsilon, use_diagonal_of=y_cov), t_cov)
scale = tf.reciprocal(((mu + tf.linalg.trace(nt_inv_t_matrix)) + 1e-08))
scale = tf.expand_dims(scale, (- 1))
w = (scale * nt_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
elif (beamformer_type == 'mpdr'):
t_cov = tf.transpose(t_cov, (prefix + [(start + 2), start, (start + 1)]))
y_cov = tf.expand_dims(y_cov, axis=(- 3))
y_cov = tf.broadcast_to(y_cov, tf.shape(t_cov))
y_inv_t_matrix = _hermitian_matrix_solve(_add_diagonal_matrix(y_cov, diagload, epsilon), t_cov)
scale = tf.reciprocal((tf.linalg.trace(y_inv_t_matrix) + 1e-08))
scale = tf.cast(tf.expand_dims(scale, (- 1)), dtype=y_cov.dtype)
w = (scale * y_inv_t_matrix[(..., refmic)])
w = tf.transpose(w, (prefix + [(start + 1), start]))
else:
raise ValueError('Unknown beamformer type {}.'.format(beamformer_type))
return w<|docstring|>Calculates beamformers from full covariance estimates.
Typically mixture signal covariance is estimated from the mixture signal and
the target covariance is estimated using a mask-based covariance estimation.
Args:
y_cov: Mixture signal covariance of shape [..., mic, mic].
t_cov: Source signal covariance estimate of shape [..., mic, mic, source].
diagload: diagonal loading factor.
epsilon: data-independent stabilizer for diagonal loading.
refmic: Reference mic.
beamformer_type: 'wiener' or 'mvdr' or 'mpdr'.
Returns:
beamformers w of shape [..., mic, source].<|endoftext|> |
cb8017dd920e05b7cad9971c979d4a4596f7ef416f46fdd392299cc6427e6d91 | def _estimate_time_invariant_covariances(y, t, use_complex_mask=False, refmic=0):
'Find time-invariant covariance matrices from masks.\n\n The inputs are the mixture signal and source estimates.\n Args:\n y: Mixture signal with shape [batch, mic, frame, bin].\n t: Source estimates at reference mic [batch, source, frame, bin].\n use_complex_mask: If True, use a complex mask.\n refmic: Reference microphone index.\n Returns:\n y_ti_cov: time-invariant spatial covariance matrix for mixture signal of\n shape [batch, bin, mic, mic].\n t_ti_cov: time-invariant spatial covariance matrix for source signals of\n shape [batch, bin, mic, mic, source].\n '
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'frame', 'bin', 'mic', 1])
t = tensor_shaper.change(t, ['batch', 'source', 'frame', 'bin'], ['batch', 'frame', 'bin', 'source'])
y_outprod = tf.matmul(y, y, adjoint_b=True)
tensor_shaper.register_axes(y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic'])
y_ti_cov = tf.reduce_mean(y_outprod, axis=1)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
t_power = tf.square(tf.abs(t))
if use_complex_mask:
y_refmic = y[(:, :, :, refmic:(refmic + 1), 0)]
y_refmic_power = tf.square(tf.abs(y_refmic))
power_limit = 1e-08
est_masks = tf.where(tf.logical_and((y_refmic_power > power_limit), (t_power < (y_refmic_power * 3.0))), (t / (y_refmic + power_limit)), tf.zeros_like(t))
est_masks = tf.conj(est_masks)
else:
power_offset = 1e-08
t_power += power_offset
est_masks = (t_power / tf.reduce_sum(t_power, axis=(- 1), keepdims=True))
est_masks = tf.cast(est_masks, dtype=y_outprod.dtype)
est_masks = tensor_shaper.change(est_masks, ['batch', 'frame', 'bin', 'source'], ['batch', 'frame', 'bin', 1, 1, 'source'])
masked_y_outprod = (tf.expand_dims(y_outprod, axis=(- 1)) * est_masks)
tensor_shaper.register_axes(masked_y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic', 'source'])
t_ti_cov = tf.reduce_mean(masked_y_outprod, axis=1)
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
return (y_ti_cov, t_ti_cov) | Find time-invariant covariance matrices from masks.
The inputs are the mixture signal and source estimates.
Args:
y: Mixture signal with shape [batch, mic, frame, bin].
t: Source estimates at reference mic [batch, source, frame, bin].
use_complex_mask: If True, use a complex mask.
refmic: Reference microphone index.
Returns:
y_ti_cov: time-invariant spatial covariance matrix for mixture signal of
shape [batch, bin, mic, mic].
t_ti_cov: time-invariant spatial covariance matrix for source signals of
shape [batch, bin, mic, mic, source]. | models/train/multichannel_filtering.py | _estimate_time_invariant_covariances | marciopuga/sound-separation | 412 | python | def _estimate_time_invariant_covariances(y, t, use_complex_mask=False, refmic=0):
'Find time-invariant covariance matrices from masks.\n\n The inputs are the mixture signal and source estimates.\n Args:\n y: Mixture signal with shape [batch, mic, frame, bin].\n t: Source estimates at reference mic [batch, source, frame, bin].\n use_complex_mask: If True, use a complex mask.\n refmic: Reference microphone index.\n Returns:\n y_ti_cov: time-invariant spatial covariance matrix for mixture signal of\n shape [batch, bin, mic, mic].\n t_ti_cov: time-invariant spatial covariance matrix for source signals of\n shape [batch, bin, mic, mic, source].\n '
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'frame', 'bin', 'mic', 1])
t = tensor_shaper.change(t, ['batch', 'source', 'frame', 'bin'], ['batch', 'frame', 'bin', 'source'])
y_outprod = tf.matmul(y, y, adjoint_b=True)
tensor_shaper.register_axes(y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic'])
y_ti_cov = tf.reduce_mean(y_outprod, axis=1)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
t_power = tf.square(tf.abs(t))
if use_complex_mask:
y_refmic = y[(:, :, :, refmic:(refmic + 1), 0)]
y_refmic_power = tf.square(tf.abs(y_refmic))
power_limit = 1e-08
est_masks = tf.where(tf.logical_and((y_refmic_power > power_limit), (t_power < (y_refmic_power * 3.0))), (t / (y_refmic + power_limit)), tf.zeros_like(t))
est_masks = tf.conj(est_masks)
else:
power_offset = 1e-08
t_power += power_offset
est_masks = (t_power / tf.reduce_sum(t_power, axis=(- 1), keepdims=True))
est_masks = tf.cast(est_masks, dtype=y_outprod.dtype)
est_masks = tensor_shaper.change(est_masks, ['batch', 'frame', 'bin', 'source'], ['batch', 'frame', 'bin', 1, 1, 'source'])
masked_y_outprod = (tf.expand_dims(y_outprod, axis=(- 1)) * est_masks)
tensor_shaper.register_axes(masked_y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic', 'source'])
t_ti_cov = tf.reduce_mean(masked_y_outprod, axis=1)
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
return (y_ti_cov, t_ti_cov) | def _estimate_time_invariant_covariances(y, t, use_complex_mask=False, refmic=0):
'Find time-invariant covariance matrices from masks.\n\n The inputs are the mixture signal and source estimates.\n Args:\n y: Mixture signal with shape [batch, mic, frame, bin].\n t: Source estimates at reference mic [batch, source, frame, bin].\n use_complex_mask: If True, use a complex mask.\n refmic: Reference microphone index.\n Returns:\n y_ti_cov: time-invariant spatial covariance matrix for mixture signal of\n shape [batch, bin, mic, mic].\n t_ti_cov: time-invariant spatial covariance matrix for source signals of\n shape [batch, bin, mic, mic, source].\n '
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'frame', 'bin', 'mic', 1])
t = tensor_shaper.change(t, ['batch', 'source', 'frame', 'bin'], ['batch', 'frame', 'bin', 'source'])
y_outprod = tf.matmul(y, y, adjoint_b=True)
tensor_shaper.register_axes(y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic'])
y_ti_cov = tf.reduce_mean(y_outprod, axis=1)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
t_power = tf.square(tf.abs(t))
if use_complex_mask:
y_refmic = y[(:, :, :, refmic:(refmic + 1), 0)]
y_refmic_power = tf.square(tf.abs(y_refmic))
power_limit = 1e-08
est_masks = tf.where(tf.logical_and((y_refmic_power > power_limit), (t_power < (y_refmic_power * 3.0))), (t / (y_refmic + power_limit)), tf.zeros_like(t))
est_masks = tf.conj(est_masks)
else:
power_offset = 1e-08
t_power += power_offset
est_masks = (t_power / tf.reduce_sum(t_power, axis=(- 1), keepdims=True))
est_masks = tf.cast(est_masks, dtype=y_outprod.dtype)
est_masks = tensor_shaper.change(est_masks, ['batch', 'frame', 'bin', 'source'], ['batch', 'frame', 'bin', 1, 1, 'source'])
masked_y_outprod = (tf.expand_dims(y_outprod, axis=(- 1)) * est_masks)
tensor_shaper.register_axes(masked_y_outprod, ['batch', 'frame', 'bin', 'mic', 'mic', 'source'])
t_ti_cov = tf.reduce_mean(masked_y_outprod, axis=1)
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
return (y_ti_cov, t_ti_cov)<|docstring|>Find time-invariant covariance matrices from masks.
The inputs are the mixture signal and source estimates.
Args:
y: Mixture signal with shape [batch, mic, frame, bin].
t: Source estimates at reference mic [batch, source, frame, bin].
use_complex_mask: If True, use a complex mask.
refmic: Reference microphone index.
Returns:
y_ti_cov: time-invariant spatial covariance matrix for mixture signal of
shape [batch, bin, mic, mic].
t_ti_cov: time-invariant spatial covariance matrix for source signals of
shape [batch, bin, mic, mic, source].<|endoftext|> |
491f98de4f486335fcb4a3adea6ef8974a840f2e2412aad62d8509464c2dffc7 | def time_invariant_multichannel_filtering(y, t, use_complex_mask=False, beamformer_type='wiener', refmic=0, diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from time-invariant covariances.\n\n Args:\n y: [batch, mic, frame, bin], complex64, mixture spectrogram.\n t: [batch, source, frame, bin], complex64, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n diagload: A float32 value, diagonal loading for the matrix inversion in\n beamforming.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n bf_y: [batch, source, frame, bin], complex64, beamformed spectrogram.\n w_H: [batch, bin, source, mic], complex64, beamformer coefficient conjugate.\n "
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
with tf.name_scope(None, 'time_invariant_multichannel_wiener_filter'):
(y_ti_cov, t_ti_cov) = _estimate_time_invariant_covariances(y, t, use_complex_mask, refmic)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
w = _get_beamformer_from_covariances(y_ti_cov, t_ti_cov, diagload=diagload, epsilon=epsilon, refmic=refmic, beamformer_type=beamformer_type)
w_h = tf.conj(tensor_shaper.change(w, ['batch', 'bin', 'mic', 'source'], ['batch', 'bin', 'source', 'mic']))
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'bin', 'mic', 'frame'])
w_h_y = tf.matmul(w_h, y)
bf_y = tensor_shaper.change(w_h_y, ['batch', 'bin', 'source', 'frame'], ['batch', 'source', 'frame', 'bin'])
return (bf_y, w_h) | Computes a multi-channel Wiener filter from time-invariant covariances.
Args:
y: [batch, mic, frame, bin], complex64, mixture spectrogram.
t: [batch, source, frame, bin], complex64, estimated spectrogram.
use_complex_mask: If True, use a complex mask.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
refmic: index of the reference mic.
diagload: A float32 value, diagonal loading for the matrix inversion in
beamforming.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
bf_y: [batch, source, frame, bin], complex64, beamformed spectrogram.
w_H: [batch, bin, source, mic], complex64, beamformer coefficient conjugate. | models/train/multichannel_filtering.py | time_invariant_multichannel_filtering | marciopuga/sound-separation | 412 | python | def time_invariant_multichannel_filtering(y, t, use_complex_mask=False, beamformer_type='wiener', refmic=0, diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from time-invariant covariances.\n\n Args:\n y: [batch, mic, frame, bin], complex64, mixture spectrogram.\n t: [batch, source, frame, bin], complex64, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n diagload: A float32 value, diagonal loading for the matrix inversion in\n beamforming.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n bf_y: [batch, source, frame, bin], complex64, beamformed spectrogram.\n w_H: [batch, bin, source, mic], complex64, beamformer coefficient conjugate.\n "
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
with tf.name_scope(None, 'time_invariant_multichannel_wiener_filter'):
(y_ti_cov, t_ti_cov) = _estimate_time_invariant_covariances(y, t, use_complex_mask, refmic)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
w = _get_beamformer_from_covariances(y_ti_cov, t_ti_cov, diagload=diagload, epsilon=epsilon, refmic=refmic, beamformer_type=beamformer_type)
w_h = tf.conj(tensor_shaper.change(w, ['batch', 'bin', 'mic', 'source'], ['batch', 'bin', 'source', 'mic']))
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'bin', 'mic', 'frame'])
w_h_y = tf.matmul(w_h, y)
bf_y = tensor_shaper.change(w_h_y, ['batch', 'bin', 'source', 'frame'], ['batch', 'source', 'frame', 'bin'])
return (bf_y, w_h) | def time_invariant_multichannel_filtering(y, t, use_complex_mask=False, beamformer_type='wiener', refmic=0, diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from time-invariant covariances.\n\n Args:\n y: [batch, mic, frame, bin], complex64, mixture spectrogram.\n t: [batch, source, frame, bin], complex64, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n diagload: A float32 value, diagonal loading for the matrix inversion in\n beamforming.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n bf_y: [batch, source, frame, bin], complex64, beamformed spectrogram.\n w_H: [batch, bin, source, mic], complex64, beamformer coefficient conjugate.\n "
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
with tf.name_scope(None, 'time_invariant_multichannel_wiener_filter'):
(y_ti_cov, t_ti_cov) = _estimate_time_invariant_covariances(y, t, use_complex_mask, refmic)
tensor_shaper.register_axes(y_ti_cov, ['batch', 'bin', 'mic', 'mic'])
tensor_shaper.register_axes(t_ti_cov, ['batch', 'bin', 'mic', 'mic', 'source'])
w = _get_beamformer_from_covariances(y_ti_cov, t_ti_cov, diagload=diagload, epsilon=epsilon, refmic=refmic, beamformer_type=beamformer_type)
w_h = tf.conj(tensor_shaper.change(w, ['batch', 'bin', 'mic', 'source'], ['batch', 'bin', 'source', 'mic']))
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'bin'], ['batch', 'bin', 'mic', 'frame'])
w_h_y = tf.matmul(w_h, y)
bf_y = tensor_shaper.change(w_h_y, ['batch', 'bin', 'source', 'frame'], ['batch', 'source', 'frame', 'bin'])
return (bf_y, w_h)<|docstring|>Computes a multi-channel Wiener filter from time-invariant covariances.
Args:
y: [batch, mic, frame, bin], complex64, mixture spectrogram.
t: [batch, source, frame, bin], complex64, estimated spectrogram.
use_complex_mask: If True, use a complex mask.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
refmic: index of the reference mic.
diagload: A float32 value, diagonal loading for the matrix inversion in
beamforming.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
bf_y: [batch, source, frame, bin], complex64, beamformed spectrogram.
w_H: [batch, bin, source, mic], complex64, beamformer coefficient conjugate.<|endoftext|> |
c7b6545df07d1bc67370dcc0605a6e961c9965ed8e575ad2e29dba5dd9bbe052 | def compute_multichannel_filter(y, t, use_complex_mask=False, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', refmic=0, block_size_in_frames=(- 1), diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from spectrogram-like inputs.\n\n Args:\n y: [batch, mic, frame, bin], complex64/float32, mixture spectrogram.\n t: [batch, source, frame, bin], complex64/float32, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n block_size_in_frames: an int32 value, block size in frames.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, frame, bin], complex64/float32, beamformed y.\n "
y = tf.convert_to_tensor(y, name='y')
t = tf.convert_to_tensor(t, name='t')
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
tensor_shaper.register_axes(t, ['batch', 'source', 'frame', 'bin'])
batch = tensor_shaper.axis_sizes['batch']
n_frames = tensor_shaper.axis_sizes['frame']
if (frame_context_length > 1):
if (frame_context_type == 'causal'):
y = tf.pad(y, [(0, 0), (0, 0), ((frame_context_length - 1), 0), (0, 0)])
center_frame_index = (frame_context_length - 1)
elif (frame_context_type == 'centered'):
pad_end = ((frame_context_length - 1) // 2)
pad_begin = ((frame_context_length - 1) - pad_end)
y = tf.pad(y, [(0, 0), (0, 0), (pad_begin, pad_end), (0, 0)])
center_frame_index = pad_begin
else:
raise ValueError('Unknown frame context type {}'.format(frame_context_type))
y = tf.signal.frame(y, frame_context_length, 1, axis=2)
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'context', 'bin'], ['batch', ('mic', 'context'), 'frame', 'bin'])
refmic = ((refmic * frame_context_length) + center_frame_index)
if (block_size_in_frames < 0):
n_frames_in_block = n_frames
perform_blocking = False
else:
assert (block_size_in_frames > 0)
if tf.is_tensor(n_frames):
n_frames_in_block = tf.minimum(n_frames, block_size_in_frames)
else:
n_frames_in_block = min(n_frames, block_size_in_frames)
perform_blocking = True
if perform_blocking:
overlap_window = tf.cast(tf.signal.vorbis_window(n_frames_in_block), dtype=y.dtype)
def extract_blocks(tensor):
'Extract overlapping blocks from signals.'
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor
y = extract_blocks(y)
t = extract_blocks(t)
(bf_y, _) = time_invariant_multichannel_filtering(y, t, use_complex_mask=use_complex_mask, beamformer_type=beamformer_type, refmic=refmic, diagload=diagload, epsilon=epsilon)
if perform_blocking:
block_shaper = shaper.Shaper()
block_shaper.register_axes(bf_y, ['block_and_batch', 'source', 'frame_in_block', 'bin'])
half_size = (n_frames_in_block // 2)
n_blocks = (tf.shape(bf_y)[0] / batch)
tensor_shape = tf.concat([[n_blocks, batch], tf.shape(bf_y)[1:]], axis=0)
bf_y = tf.reshape(bf_y, tensor_shape)
block_shaper.register_axes(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'])
bf_y = block_shaper.change(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'], ['batch', 'source', 'bin', 'block', 'frame_in_block'])
window_reshaped = tf.reshape(overlap_window, [1, 1, 1, 1, n_frames_in_block])
bf_y *= window_reshaped
bf_y = tf.signal.overlap_and_add(bf_y, half_size)
bf_y = bf_y[(..., half_size:(half_size + n_frames))]
block_shaper.register_axes(bf_y, ['batch', 'source', 'bin', 'frame'])
bf_y = block_shaper.change(bf_y, ['batch', 'source', 'bin', 'frame'], ['batch', 'source', 'frame', 'bin'])
return bf_y | Computes a multi-channel Wiener filter from spectrogram-like inputs.
Args:
y: [batch, mic, frame, bin], complex64/float32, mixture spectrogram.
t: [batch, source, frame, bin], complex64/float32, estimated spectrogram.
use_complex_mask: If True, use a complex mask.
frame_context_length: An integer value to specify the number of
contextual frames used in beamforming.
frame_context_type: 'causal' or 'centered'.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
refmic: index of the reference mic.
block_size_in_frames: an int32 value, block size in frames.
diagload: float32, diagonal loading value for the matrix inversion in
beamforming. Note that this value is likely dependent on the energy level
of the input mixture. The default value has been tuned based on the
assumption that the time-domain RMS normalization is performed, and the
covariance matrices are always divided by the number of frames.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
[batch, source, frame, bin], complex64/float32, beamformed y. | models/train/multichannel_filtering.py | compute_multichannel_filter | marciopuga/sound-separation | 412 | python | def compute_multichannel_filter(y, t, use_complex_mask=False, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', refmic=0, block_size_in_frames=(- 1), diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from spectrogram-like inputs.\n\n Args:\n y: [batch, mic, frame, bin], complex64/float32, mixture spectrogram.\n t: [batch, source, frame, bin], complex64/float32, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n block_size_in_frames: an int32 value, block size in frames.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, frame, bin], complex64/float32, beamformed y.\n "
y = tf.convert_to_tensor(y, name='y')
t = tf.convert_to_tensor(t, name='t')
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
tensor_shaper.register_axes(t, ['batch', 'source', 'frame', 'bin'])
batch = tensor_shaper.axis_sizes['batch']
n_frames = tensor_shaper.axis_sizes['frame']
if (frame_context_length > 1):
if (frame_context_type == 'causal'):
y = tf.pad(y, [(0, 0), (0, 0), ((frame_context_length - 1), 0), (0, 0)])
center_frame_index = (frame_context_length - 1)
elif (frame_context_type == 'centered'):
pad_end = ((frame_context_length - 1) // 2)
pad_begin = ((frame_context_length - 1) - pad_end)
y = tf.pad(y, [(0, 0), (0, 0), (pad_begin, pad_end), (0, 0)])
center_frame_index = pad_begin
else:
raise ValueError('Unknown frame context type {}'.format(frame_context_type))
y = tf.signal.frame(y, frame_context_length, 1, axis=2)
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'context', 'bin'], ['batch', ('mic', 'context'), 'frame', 'bin'])
refmic = ((refmic * frame_context_length) + center_frame_index)
if (block_size_in_frames < 0):
n_frames_in_block = n_frames
perform_blocking = False
else:
assert (block_size_in_frames > 0)
if tf.is_tensor(n_frames):
n_frames_in_block = tf.minimum(n_frames, block_size_in_frames)
else:
n_frames_in_block = min(n_frames, block_size_in_frames)
perform_blocking = True
if perform_blocking:
overlap_window = tf.cast(tf.signal.vorbis_window(n_frames_in_block), dtype=y.dtype)
def extract_blocks(tensor):
'Extract overlapping blocks from signals.'
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor
y = extract_blocks(y)
t = extract_blocks(t)
(bf_y, _) = time_invariant_multichannel_filtering(y, t, use_complex_mask=use_complex_mask, beamformer_type=beamformer_type, refmic=refmic, diagload=diagload, epsilon=epsilon)
if perform_blocking:
block_shaper = shaper.Shaper()
block_shaper.register_axes(bf_y, ['block_and_batch', 'source', 'frame_in_block', 'bin'])
half_size = (n_frames_in_block // 2)
n_blocks = (tf.shape(bf_y)[0] / batch)
tensor_shape = tf.concat([[n_blocks, batch], tf.shape(bf_y)[1:]], axis=0)
bf_y = tf.reshape(bf_y, tensor_shape)
block_shaper.register_axes(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'])
bf_y = block_shaper.change(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'], ['batch', 'source', 'bin', 'block', 'frame_in_block'])
window_reshaped = tf.reshape(overlap_window, [1, 1, 1, 1, n_frames_in_block])
bf_y *= window_reshaped
bf_y = tf.signal.overlap_and_add(bf_y, half_size)
bf_y = bf_y[(..., half_size:(half_size + n_frames))]
block_shaper.register_axes(bf_y, ['batch', 'source', 'bin', 'frame'])
bf_y = block_shaper.change(bf_y, ['batch', 'source', 'bin', 'frame'], ['batch', 'source', 'frame', 'bin'])
return bf_y | def compute_multichannel_filter(y, t, use_complex_mask=False, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', refmic=0, block_size_in_frames=(- 1), diagload=0.001, epsilon=1e-08):
"Computes a multi-channel Wiener filter from spectrogram-like inputs.\n\n Args:\n y: [batch, mic, frame, bin], complex64/float32, mixture spectrogram.\n t: [batch, source, frame, bin], complex64/float32, estimated spectrogram.\n use_complex_mask: If True, use a complex mask.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n refmic: index of the reference mic.\n block_size_in_frames: an int32 value, block size in frames.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, frame, bin], complex64/float32, beamformed y.\n "
y = tf.convert_to_tensor(y, name='y')
t = tf.convert_to_tensor(t, name='t')
tensor_shaper = shaper.Shaper()
tensor_shaper.register_axes(y, ['batch', 'mic', 'frame', 'bin'])
tensor_shaper.register_axes(t, ['batch', 'source', 'frame', 'bin'])
batch = tensor_shaper.axis_sizes['batch']
n_frames = tensor_shaper.axis_sizes['frame']
if (frame_context_length > 1):
if (frame_context_type == 'causal'):
y = tf.pad(y, [(0, 0), (0, 0), ((frame_context_length - 1), 0), (0, 0)])
center_frame_index = (frame_context_length - 1)
elif (frame_context_type == 'centered'):
pad_end = ((frame_context_length - 1) // 2)
pad_begin = ((frame_context_length - 1) - pad_end)
y = tf.pad(y, [(0, 0), (0, 0), (pad_begin, pad_end), (0, 0)])
center_frame_index = pad_begin
else:
raise ValueError('Unknown frame context type {}'.format(frame_context_type))
y = tf.signal.frame(y, frame_context_length, 1, axis=2)
y = tensor_shaper.change(y, ['batch', 'mic', 'frame', 'context', 'bin'], ['batch', ('mic', 'context'), 'frame', 'bin'])
refmic = ((refmic * frame_context_length) + center_frame_index)
if (block_size_in_frames < 0):
n_frames_in_block = n_frames
perform_blocking = False
else:
assert (block_size_in_frames > 0)
if tf.is_tensor(n_frames):
n_frames_in_block = tf.minimum(n_frames, block_size_in_frames)
else:
n_frames_in_block = min(n_frames, block_size_in_frames)
perform_blocking = True
if perform_blocking:
overlap_window = tf.cast(tf.signal.vorbis_window(n_frames_in_block), dtype=y.dtype)
def extract_blocks(tensor):
'Extract overlapping blocks from signals.'
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor
y = extract_blocks(y)
t = extract_blocks(t)
(bf_y, _) = time_invariant_multichannel_filtering(y, t, use_complex_mask=use_complex_mask, beamformer_type=beamformer_type, refmic=refmic, diagload=diagload, epsilon=epsilon)
if perform_blocking:
block_shaper = shaper.Shaper()
block_shaper.register_axes(bf_y, ['block_and_batch', 'source', 'frame_in_block', 'bin'])
half_size = (n_frames_in_block // 2)
n_blocks = (tf.shape(bf_y)[0] / batch)
tensor_shape = tf.concat([[n_blocks, batch], tf.shape(bf_y)[1:]], axis=0)
bf_y = tf.reshape(bf_y, tensor_shape)
block_shaper.register_axes(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'])
bf_y = block_shaper.change(bf_y, ['block', 'batch', 'source', 'frame_in_block', 'bin'], ['batch', 'source', 'bin', 'block', 'frame_in_block'])
window_reshaped = tf.reshape(overlap_window, [1, 1, 1, 1, n_frames_in_block])
bf_y *= window_reshaped
bf_y = tf.signal.overlap_and_add(bf_y, half_size)
bf_y = bf_y[(..., half_size:(half_size + n_frames))]
block_shaper.register_axes(bf_y, ['batch', 'source', 'bin', 'frame'])
bf_y = block_shaper.change(bf_y, ['batch', 'source', 'bin', 'frame'], ['batch', 'source', 'frame', 'bin'])
return bf_y<|docstring|>Computes a multi-channel Wiener filter from spectrogram-like inputs.
Args:
y: [batch, mic, frame, bin], complex64/float32, mixture spectrogram.
t: [batch, source, frame, bin], complex64/float32, estimated spectrogram.
use_complex_mask: If True, use a complex mask.
frame_context_length: An integer value to specify the number of
contextual frames used in beamforming.
frame_context_type: 'causal' or 'centered'.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
refmic: index of the reference mic.
block_size_in_frames: an int32 value, block size in frames.
diagload: float32, diagonal loading value for the matrix inversion in
beamforming. Note that this value is likely dependent on the energy level
of the input mixture. The default value has been tuned based on the
assumption that the time-domain RMS normalization is performed, and the
covariance matrices are always divided by the number of frames.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
[batch, source, frame, bin], complex64/float32, beamformed y.<|endoftext|> |
afce7ded5f2c9b64b63c28f87d6c7746199926f53363ed3768d2428234f90caf | def compute_multichannel_filter_from_signals(y, t, refmic=0, sample_rate=16000.0, ws=0.064, hs=0.032, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', block_size_in_seconds=(- 1), use_complex_mask=False, diagload=0.001, epsilon=1e-08):
"Computes a multichannel Wiener filter to estimate a target t from y.\n\n Args:\n y: [batch, mic, time], float32, mixture waveform.\n t: [batch, source, time], float32, estimated waveform.\n refmic: Index of the reference mic.\n sample_rate: Sampling rate of audio in Hz.\n ws: Window size in seconds.\n hs: Hop size in seconds.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n block_size_in_seconds: block size in seconds.\n use_complex_mask: If True, use a complex mask.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, time], float32, beamformed waveform y.\n "
noisy_length = signal_util.static_or_dynamic_dim_size(y, (- 1))
transformer = signal_transformer.SignalTransformer(sample_rate=sample_rate, window_time_seconds=ws, hop_time_seconds=hs, magnitude_offset=1e-08, zeropad_beginning=True)
y_spectrograms = transformer.forward(y)
t_spectrograms = transformer.forward(t)
block_size_in_frames = int(round((block_size_in_seconds / hs)))
beamformed_spectrograms = compute_multichannel_filter(y_spectrograms, t_spectrograms, frame_context_length=frame_context_length, frame_context_type=frame_context_type, beamformer_type=beamformer_type, refmic=refmic, block_size_in_frames=block_size_in_frames, use_complex_mask=use_complex_mask, diagload=diagload, epsilon=epsilon)
beamformed_waveforms = transformer.inverse(beamformed_spectrograms)[(..., :noisy_length)]
return beamformed_waveforms | Computes a multichannel Wiener filter to estimate a target t from y.
Args:
y: [batch, mic, time], float32, mixture waveform.
t: [batch, source, time], float32, estimated waveform.
refmic: Index of the reference mic.
sample_rate: Sampling rate of audio in Hz.
ws: Window size in seconds.
hs: Hop size in seconds.
frame_context_length: An integer value to specify the number of
contextual frames used in beamforming.
frame_context_type: 'causal' or 'centered'.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
block_size_in_seconds: block size in seconds.
use_complex_mask: If True, use a complex mask.
diagload: float32, diagonal loading value for the matrix inversion in
beamforming. Note that this value is likely dependent on the energy level
of the input mixture. The default value has been tuned based on the
assumption that the time-domain RMS normalization is performed, and the
covariance matrices are always divided by the number of frames.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
[batch, source, time], float32, beamformed waveform y. | models/train/multichannel_filtering.py | compute_multichannel_filter_from_signals | marciopuga/sound-separation | 412 | python | def compute_multichannel_filter_from_signals(y, t, refmic=0, sample_rate=16000.0, ws=0.064, hs=0.032, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', block_size_in_seconds=(- 1), use_complex_mask=False, diagload=0.001, epsilon=1e-08):
"Computes a multichannel Wiener filter to estimate a target t from y.\n\n Args:\n y: [batch, mic, time], float32, mixture waveform.\n t: [batch, source, time], float32, estimated waveform.\n refmic: Index of the reference mic.\n sample_rate: Sampling rate of audio in Hz.\n ws: Window size in seconds.\n hs: Hop size in seconds.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n block_size_in_seconds: block size in seconds.\n use_complex_mask: If True, use a complex mask.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, time], float32, beamformed waveform y.\n "
noisy_length = signal_util.static_or_dynamic_dim_size(y, (- 1))
transformer = signal_transformer.SignalTransformer(sample_rate=sample_rate, window_time_seconds=ws, hop_time_seconds=hs, magnitude_offset=1e-08, zeropad_beginning=True)
y_spectrograms = transformer.forward(y)
t_spectrograms = transformer.forward(t)
block_size_in_frames = int(round((block_size_in_seconds / hs)))
beamformed_spectrograms = compute_multichannel_filter(y_spectrograms, t_spectrograms, frame_context_length=frame_context_length, frame_context_type=frame_context_type, beamformer_type=beamformer_type, refmic=refmic, block_size_in_frames=block_size_in_frames, use_complex_mask=use_complex_mask, diagload=diagload, epsilon=epsilon)
beamformed_waveforms = transformer.inverse(beamformed_spectrograms)[(..., :noisy_length)]
return beamformed_waveforms | def compute_multichannel_filter_from_signals(y, t, refmic=0, sample_rate=16000.0, ws=0.064, hs=0.032, frame_context_length=1, frame_context_type='causal', beamformer_type='wiener', block_size_in_seconds=(- 1), use_complex_mask=False, diagload=0.001, epsilon=1e-08):
"Computes a multichannel Wiener filter to estimate a target t from y.\n\n Args:\n y: [batch, mic, time], float32, mixture waveform.\n t: [batch, source, time], float32, estimated waveform.\n refmic: Index of the reference mic.\n sample_rate: Sampling rate of audio in Hz.\n ws: Window size in seconds.\n hs: Hop size in seconds.\n frame_context_length: An integer value to specify the number of\n contextual frames used in beamforming.\n frame_context_type: 'causal' or 'centered'.\n beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'\n or 'mpdr'.\n block_size_in_seconds: block size in seconds.\n use_complex_mask: If True, use a complex mask.\n diagload: float32, diagonal loading value for the matrix inversion in\n beamforming. Note that this value is likely dependent on the energy level\n of the input mixture. The default value has been tuned based on the\n assumption that the time-domain RMS normalization is performed, and the\n covariance matrices are always divided by the number of frames.\n epsilon: A float32 value, data-independent stabilizer for diagonal loading.\n\n Returns:\n [batch, source, time], float32, beamformed waveform y.\n "
noisy_length = signal_util.static_or_dynamic_dim_size(y, (- 1))
transformer = signal_transformer.SignalTransformer(sample_rate=sample_rate, window_time_seconds=ws, hop_time_seconds=hs, magnitude_offset=1e-08, zeropad_beginning=True)
y_spectrograms = transformer.forward(y)
t_spectrograms = transformer.forward(t)
block_size_in_frames = int(round((block_size_in_seconds / hs)))
beamformed_spectrograms = compute_multichannel_filter(y_spectrograms, t_spectrograms, frame_context_length=frame_context_length, frame_context_type=frame_context_type, beamformer_type=beamformer_type, refmic=refmic, block_size_in_frames=block_size_in_frames, use_complex_mask=use_complex_mask, diagload=diagload, epsilon=epsilon)
beamformed_waveforms = transformer.inverse(beamformed_spectrograms)[(..., :noisy_length)]
return beamformed_waveforms<|docstring|>Computes a multichannel Wiener filter to estimate a target t from y.
Args:
y: [batch, mic, time], float32, mixture waveform.
t: [batch, source, time], float32, estimated waveform.
refmic: Index of the reference mic.
sample_rate: Sampling rate of audio in Hz.
ws: Window size in seconds.
hs: Hop size in seconds.
frame_context_length: An integer value to specify the number of
contextual frames used in beamforming.
frame_context_type: 'causal' or 'centered'.
beamformer_type: A string describing beamformer type. 'wiener', 'mvdr'
or 'mpdr'.
block_size_in_seconds: block size in seconds.
use_complex_mask: If True, use a complex mask.
diagload: float32, diagonal loading value for the matrix inversion in
beamforming. Note that this value is likely dependent on the energy level
of the input mixture. The default value has been tuned based on the
assumption that the time-domain RMS normalization is performed, and the
covariance matrices are always divided by the number of frames.
epsilon: A float32 value, data-independent stabilizer for diagonal loading.
Returns:
[batch, source, time], float32, beamformed waveform y.<|endoftext|> |
392a09b0fc3eaf347d98dc0a431f964eede2dbba09220215937e2c8c38233c4a | def extract_blocks(tensor):
'Extract overlapping blocks from signals.'
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor | Extract overlapping blocks from signals. | models/train/multichannel_filtering.py | extract_blocks | marciopuga/sound-separation | 412 | python | def extract_blocks(tensor):
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor | def extract_blocks(tensor):
half_size = (n_frames_in_block // 2)
tensor = tf.pad(tensor, [(0, 0), (0, 0), (half_size, 0), (0, 0)])
tensor = tf.signal.frame(tensor, n_frames_in_block, half_size, pad_end=True, axis=(- 2))
local_shaper = shaper.Shaper()
tensor = local_shaper.change(tensor, ['batch', 'chan', 'block', 'frame', 'bin'], [('batch', 'block'), 'chan', 'frame', 'bin'])
window_reshaped = tf.reshape(overlap_window, [1, 1, n_frames_in_block, 1])
tensor *= window_reshaped
return tensor<|docstring|>Extract overlapping blocks from signals.<|endoftext|> |
47000a3b0e9a1df2a78a128f9d6693861e95c5e250396be2e33c95ad81968a0d | def has_object_destroy_permission(self, request):
'Currently refers only to delete action'
return request.user.is_superuser | Currently refers only to delete action | care/facility/models/patient.py | has_object_destroy_permission | Nikhil713/care | 0 | python | def has_object_destroy_permission(self, request):
return request.user.is_superuser | def has_object_destroy_permission(self, request):
return request.user.is_superuser<|docstring|>Currently refers only to delete action<|endoftext|> |
93ee366a9f934296bfa3bfa8018f6f9c9cc7eacaec184ed04ad8595f168b9a7a | def save(self, *args, **kwargs) -> None:
"\n While saving, if the local body is not null, then district will be local body's district\n Overriding save will help in a collision where the local body's district and district fields are different.\n\n It also creates/updates the PatientSearch model\n\n Parameters\n ----------\n args: list of args - not used\n kwargs: keyword args - not used\n\n Returns\n -------\n None\n "
if (self.local_body is not None):
self.district = self.local_body.district
if (self.district is not None):
self.state = self.district.state
self.year_of_birth = (self.date_of_birth.year if (self.date_of_birth is not None) else (datetime.datetime.now().year - self.age))
is_create = (self.pk is None)
super().save(*args, **kwargs)
if (is_create or (self.patient_search_id is None)):
ps = PatientSearch.objects.create(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id, patient_id=self.pk)
self.patient_search_id = ps.pk
self.save()
else:
PatientSearch.objects.filter(pk=self.patient_search_id).update(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id) | While saving, if the local body is not null, then district will be local body's district
Overriding save will help in a collision where the local body's district and district fields are different.
It also creates/updates the PatientSearch model
Parameters
----------
args: list of args - not used
kwargs: keyword args - not used
Returns
-------
None | care/facility/models/patient.py | save | Nikhil713/care | 0 | python | def save(self, *args, **kwargs) -> None:
"\n While saving, if the local body is not null, then district will be local body's district\n Overriding save will help in a collision where the local body's district and district fields are different.\n\n It also creates/updates the PatientSearch model\n\n Parameters\n ----------\n args: list of args - not used\n kwargs: keyword args - not used\n\n Returns\n -------\n None\n "
if (self.local_body is not None):
self.district = self.local_body.district
if (self.district is not None):
self.state = self.district.state
self.year_of_birth = (self.date_of_birth.year if (self.date_of_birth is not None) else (datetime.datetime.now().year - self.age))
is_create = (self.pk is None)
super().save(*args, **kwargs)
if (is_create or (self.patient_search_id is None)):
ps = PatientSearch.objects.create(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id, patient_id=self.pk)
self.patient_search_id = ps.pk
self.save()
else:
PatientSearch.objects.filter(pk=self.patient_search_id).update(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id) | def save(self, *args, **kwargs) -> None:
"\n While saving, if the local body is not null, then district will be local body's district\n Overriding save will help in a collision where the local body's district and district fields are different.\n\n It also creates/updates the PatientSearch model\n\n Parameters\n ----------\n args: list of args - not used\n kwargs: keyword args - not used\n\n Returns\n -------\n None\n "
if (self.local_body is not None):
self.district = self.local_body.district
if (self.district is not None):
self.state = self.district.state
self.year_of_birth = (self.date_of_birth.year if (self.date_of_birth is not None) else (datetime.datetime.now().year - self.age))
is_create = (self.pk is None)
super().save(*args, **kwargs)
if (is_create or (self.patient_search_id is None)):
ps = PatientSearch.objects.create(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id, patient_id=self.pk)
self.patient_search_id = ps.pk
self.save()
else:
PatientSearch.objects.filter(pk=self.patient_search_id).update(name=self.name, gender=self.gender, phone_number=self.phone_number, date_of_birth=self.date_of_birth, year_of_birth=self.year_of_birth, state_id=self.state_id)<|docstring|>While saving, if the local body is not null, then district will be local body's district
Overriding save will help in a collision where the local body's district and district fields are different.
It also creates/updates the PatientSearch model
Parameters
----------
args: list of args - not used
kwargs: keyword args - not used
Returns
-------
None<|endoftext|> |
1a7a8d42f35343b6bdaa63eb99a18f03a472e5bf5f2fdc28ab82a85cc8645393 | @staticmethod
def locate_msbuild():
'\n Attempts to find msbuild executable in the local filesystem\n '
if (sys.platform == 'win32'):
msbuild_search_patterns = []
vs_msbuild_pattern = '\\Microsoft Visual Studio\\*\\Community\\MSBuild\\*\\Bin\\MSBuild.exe'
dotnet_msbuild_pattern = '\\Microsoft.NET\\Framework\\*\\MSBuild.exe'
if ('ProgramFiles' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles'] + vs_msbuild_pattern))
if ('ProgramFiles(x86)' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles(x86)'] + vs_msbuild_pattern))
if ('WINDIR' in os.environ):
msbuild_search_patterns.append((os.environ['WINDIR'] + dotnet_msbuild_pattern))
for pattern in msbuild_search_patterns:
locations = glob.glob(pattern)
for location in sorted(locations, reverse=True):
if MsBuildRunner.valid_msbuild_executable(location):
return location
if MsBuildRunner.valid_msbuild_executable('msbuild'):
return 'msbuild'
return None | Attempts to find msbuild executable in the local filesystem | ugetcli/msbuild.py | locate_msbuild | AgeOfLearning/uget-cli | 1 | python | @staticmethod
def locate_msbuild():
'\n \n '
if (sys.platform == 'win32'):
msbuild_search_patterns = []
vs_msbuild_pattern = '\\Microsoft Visual Studio\\*\\Community\\MSBuild\\*\\Bin\\MSBuild.exe'
dotnet_msbuild_pattern = '\\Microsoft.NET\\Framework\\*\\MSBuild.exe'
if ('ProgramFiles' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles'] + vs_msbuild_pattern))
if ('ProgramFiles(x86)' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles(x86)'] + vs_msbuild_pattern))
if ('WINDIR' in os.environ):
msbuild_search_patterns.append((os.environ['WINDIR'] + dotnet_msbuild_pattern))
for pattern in msbuild_search_patterns:
locations = glob.glob(pattern)
for location in sorted(locations, reverse=True):
if MsBuildRunner.valid_msbuild_executable(location):
return location
if MsBuildRunner.valid_msbuild_executable('msbuild'):
return 'msbuild'
return None | @staticmethod
def locate_msbuild():
'\n \n '
if (sys.platform == 'win32'):
msbuild_search_patterns = []
vs_msbuild_pattern = '\\Microsoft Visual Studio\\*\\Community\\MSBuild\\*\\Bin\\MSBuild.exe'
dotnet_msbuild_pattern = '\\Microsoft.NET\\Framework\\*\\MSBuild.exe'
if ('ProgramFiles' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles'] + vs_msbuild_pattern))
if ('ProgramFiles(x86)' in os.environ):
msbuild_search_patterns.append((os.environ['ProgramFiles(x86)'] + vs_msbuild_pattern))
if ('WINDIR' in os.environ):
msbuild_search_patterns.append((os.environ['WINDIR'] + dotnet_msbuild_pattern))
for pattern in msbuild_search_patterns:
locations = glob.glob(pattern)
for location in sorted(locations, reverse=True):
if MsBuildRunner.valid_msbuild_executable(location):
return location
if MsBuildRunner.valid_msbuild_executable('msbuild'):
return 'msbuild'
return None<|docstring|>Attempts to find msbuild executable in the local filesystem<|endoftext|> |
7db17b6e05763083ab3e06b43a391687384fff88da1fb40f357321f554ac0579 | @staticmethod
def valid_msbuild_executable(msbuild_path):
'\n Returns True if path is a valid msbuild executable, otherwise False\n '
with open(os.devnull, 'w') as devnull:
try:
return (call((escape_exe_path(msbuild_path) + ' /?'), shell=True, stderr=devnull, stdout=devnull) == 0)
except IOError:
return False | Returns True if path is a valid msbuild executable, otherwise False | ugetcli/msbuild.py | valid_msbuild_executable | AgeOfLearning/uget-cli | 1 | python | @staticmethod
def valid_msbuild_executable(msbuild_path):
'\n \n '
with open(os.devnull, 'w') as devnull:
try:
return (call((escape_exe_path(msbuild_path) + ' /?'), shell=True, stderr=devnull, stdout=devnull) == 0)
except IOError:
return False | @staticmethod
def valid_msbuild_executable(msbuild_path):
'\n \n '
with open(os.devnull, 'w') as devnull:
try:
return (call((escape_exe_path(msbuild_path) + ' /?'), shell=True, stderr=devnull, stdout=devnull) == 0)
except IOError:
return False<|docstring|>Returns True if path is a valid msbuild executable, otherwise False<|endoftext|> |
ddf813b78213e46d2b36707f288a8d638577fe4b2c5e55a491873872507dea21 | def cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True).wait() | This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created. | Data/08.16.21_free_sym_rdsweep_mixediciv/simple_repeat.py | cmd | K-Johnson-Horrigan/Evolution-of-Endosymbiosis-Paper | 2 | python | def cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True).wait() | def cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True).wait()<|docstring|>This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.<|endoftext|> |
fa4dba7a15f9b1e4d64dbe119c02ff246fc7ed798eadde8142bb8eafffef2e18 | def silent_cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait() | This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created. | Data/08.16.21_free_sym_rdsweep_mixediciv/simple_repeat.py | silent_cmd | K-Johnson-Horrigan/Evolution-of-Endosymbiosis-Paper | 2 | python | def silent_cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait() | def silent_cmd(command):
'This wait causes all executions to run in sieries. \n For parralelization, remove .wait() and instead delay the \n R script calls unitl all neccesary data is created.'
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait()<|docstring|>This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.<|endoftext|> |
3b10dbdbab029f138a76a34931ae2918d5b9ee69aa350313a597aac0a0b1f98a | def __init__(self):
'\n Constructor for he Wrapper Interface.\n Will hold the handlers set up in the environment in the `self._handlers` in a dictionary by key name.\n Will hold a list containing the key names of handlers set up as the main ones.\n '
self._handlers = {}
self._main_handlers = []
self.initialized = False | Constructor for he Wrapper Interface.
Will hold the handlers set up in the environment in the `self._handlers` in a dictionary by key name.
Will hold a list containing the key names of handlers set up as the main ones. | mlapp/handlers/wrappers/wrapper_interface.py | __init__ | zach-navina/mlapp | 33 | python | def __init__(self):
'\n Constructor for he Wrapper Interface.\n Will hold the handlers set up in the environment in the `self._handlers` in a dictionary by key name.\n Will hold a list containing the key names of handlers set up as the main ones.\n '
self._handlers = {}
self._main_handlers = []
self.initialized = False | def __init__(self):
'\n Constructor for he Wrapper Interface.\n Will hold the handlers set up in the environment in the `self._handlers` in a dictionary by key name.\n Will hold a list containing the key names of handlers set up as the main ones.\n '
self._handlers = {}
self._main_handlers = []
self.initialized = False<|docstring|>Constructor for he Wrapper Interface.
Will hold the handlers set up in the environment in the `self._handlers` in a dictionary by key name.
Will hold a list containing the key names of handlers set up as the main ones.<|endoftext|> |
7f0500865c13acab3a7f1cab407f8f157b58e43c82b4c144c3b67063b8b4e554 | @abstractmethod
def init(self, handler_type):
'\n Initialization, should be called once only\n Populates the `self._handlers` and `self_main_handlers` variables depending on the set environment\n :param handler_type: used for filtering services by the handler type\n '
if (not self.initialized):
for service_name in settings.get('services', []):
service_item = settings['services'][service_name]
if ('type' not in service_item):
raise Exception("'{}' service is missing 'type' key, must be filled in config.py with the one of the following: database/file_storage/database/spark".format(service_name))
if (service_item['type'] == handler_type):
try:
self._handlers[service_name] = service_item['handler'](service_item.get('settings', {}))
if service_item.get('main', False):
self._main_handlers.append(service_name)
except SkipServiceException as e:
pass
except Exception as e:
if (service_item['handler'] is None):
raise Exception("'{}' service of type '{}' is missing a python library installation.".format(service_name, service_item.get('type')))
else:
raise e
self.initialized = True | Initialization, should be called once only
Populates the `self._handlers` and `self_main_handlers` variables depending on the set environment
:param handler_type: used for filtering services by the handler type | mlapp/handlers/wrappers/wrapper_interface.py | init | zach-navina/mlapp | 33 | python | @abstractmethod
def init(self, handler_type):
'\n Initialization, should be called once only\n Populates the `self._handlers` and `self_main_handlers` variables depending on the set environment\n :param handler_type: used for filtering services by the handler type\n '
if (not self.initialized):
for service_name in settings.get('services', []):
service_item = settings['services'][service_name]
if ('type' not in service_item):
raise Exception("'{}' service is missing 'type' key, must be filled in config.py with the one of the following: database/file_storage/database/spark".format(service_name))
if (service_item['type'] == handler_type):
try:
self._handlers[service_name] = service_item['handler'](service_item.get('settings', {}))
if service_item.get('main', False):
self._main_handlers.append(service_name)
except SkipServiceException as e:
pass
except Exception as e:
if (service_item['handler'] is None):
raise Exception("'{}' service of type '{}' is missing a python library installation.".format(service_name, service_item.get('type')))
else:
raise e
self.initialized = True | @abstractmethod
def init(self, handler_type):
'\n Initialization, should be called once only\n Populates the `self._handlers` and `self_main_handlers` variables depending on the set environment\n :param handler_type: used for filtering services by the handler type\n '
if (not self.initialized):
for service_name in settings.get('services', []):
service_item = settings['services'][service_name]
if ('type' not in service_item):
raise Exception("'{}' service is missing 'type' key, must be filled in config.py with the one of the following: database/file_storage/database/spark".format(service_name))
if (service_item['type'] == handler_type):
try:
self._handlers[service_name] = service_item['handler'](service_item.get('settings', {}))
if service_item.get('main', False):
self._main_handlers.append(service_name)
except SkipServiceException as e:
pass
except Exception as e:
if (service_item['handler'] is None):
raise Exception("'{}' service of type '{}' is missing a python library installation.".format(service_name, service_item.get('type')))
else:
raise e
self.initialized = True<|docstring|>Initialization, should be called once only
Populates the `self._handlers` and `self_main_handlers` variables depending on the set environment
:param handler_type: used for filtering services by the handler type<|endoftext|> |
5fa1ee8922d43acb5f3bb5d9e3dcefbb64a154ee07d7aff6ce0bee623489a01c | def get(self, handler_name):
'\n Get the handler instance by name\n :param handler_name: handler name string\n :return: Handler Instance\n '
return self._handlers.get(handler_name) | Get the handler instance by name
:param handler_name: handler name string
:return: Handler Instance | mlapp/handlers/wrappers/wrapper_interface.py | get | zach-navina/mlapp | 33 | python | def get(self, handler_name):
'\n Get the handler instance by name\n :param handler_name: handler name string\n :return: Handler Instance\n '
return self._handlers.get(handler_name) | def get(self, handler_name):
'\n Get the handler instance by name\n :param handler_name: handler name string\n :return: Handler Instance\n '
return self._handlers.get(handler_name)<|docstring|>Get the handler instance by name
:param handler_name: handler name string
:return: Handler Instance<|endoftext|> |
4a1be46b8f2a0eda1fca91a0e025359df6197359c14df6dc8a8f62745571287e | def empty(self):
'\n Checks if there are configured handlers as "main"\n '
return (len(self._main_handlers) == 0) | Checks if there are configured handlers as "main" | mlapp/handlers/wrappers/wrapper_interface.py | empty | zach-navina/mlapp | 33 | python | def empty(self):
'\n \n '
return (len(self._main_handlers) == 0) | def empty(self):
'\n \n '
return (len(self._main_handlers) == 0)<|docstring|>Checks if there are configured handlers as "main"<|endoftext|> |
c84fd9d03363f2dac6e84e56405be627528d933cb2db349f4010df23d73ef42b | def create_app(test_config: Optional[Dict[(str, Any)]]=None, *, with_db: bool=True):
'Application factory.'
app = Flask('main', static_url_path='', template_folder=cfg.TEMPLATES_DIR)
app.config.from_object(cfg)
if test_config:
app.config.update(test_config)
if app.debug:
app.logger.propagate = True
register_blueprints(app)
register_extensions(app, test_config=test_config)
register_route_checks(app)
register_custom_helpers(app)
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
if with_db:
init_db(app)
return app | Application factory. | lib/app_factory.py | create_app | pombredanne/vulncode-db | 592 | python | def create_app(test_config: Optional[Dict[(str, Any)]]=None, *, with_db: bool=True):
app = Flask('main', static_url_path=, template_folder=cfg.TEMPLATES_DIR)
app.config.from_object(cfg)
if test_config:
app.config.update(test_config)
if app.debug:
app.logger.propagate = True
register_blueprints(app)
register_extensions(app, test_config=test_config)
register_route_checks(app)
register_custom_helpers(app)
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
if with_db:
init_db(app)
return app | def create_app(test_config: Optional[Dict[(str, Any)]]=None, *, with_db: bool=True):
app = Flask('main', static_url_path=, template_folder=cfg.TEMPLATES_DIR)
app.config.from_object(cfg)
if test_config:
app.config.update(test_config)
if app.debug:
app.logger.propagate = True
register_blueprints(app)
register_extensions(app, test_config=test_config)
register_route_checks(app)
register_custom_helpers(app)
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
if with_db:
init_db(app)
return app<|docstring|>Application factory.<|endoftext|> |
88b9020d0436319236c376376bf360717ff97a67b4901e6d2219c4b88afd59d7 | def register_extensions(app, test_config=None):
'Register Flask extensions.'
Bootstrap(app)
public_paths = ['/favicon.ico', '/static/']
csrf = CSRFProtect()
csrf.init_app(app)
oauth.init_app(app)
if ((not cfg.IS_PROD) and (not test_config)):
DebugToolbarExtension(app)
csrf.exempt(debug_toolbar_bp)
public_paths.append('/_debug_toolbar/')
def always_authorize():
for path in public_paths:
if request.path.startswith(path):
logging.warning('Bypassing ACL check for %s (matches %s)', request.path, path)
request._authorized = True
return
app.before_request(always_authorize)
bouncer.init_app(app)
def check_or_404(response: Response):
if ((response.status_code // 100) != 2):
return response
try:
return bouncer.check_authorization(response)
except Forbidden:
logging.warning('Automatically denied access to response %d of %s', response.status_code, request.path)
raise
app.after_request(check_or_404) | Register Flask extensions. | lib/app_factory.py | register_extensions | pombredanne/vulncode-db | 592 | python | def register_extensions(app, test_config=None):
Bootstrap(app)
public_paths = ['/favicon.ico', '/static/']
csrf = CSRFProtect()
csrf.init_app(app)
oauth.init_app(app)
if ((not cfg.IS_PROD) and (not test_config)):
DebugToolbarExtension(app)
csrf.exempt(debug_toolbar_bp)
public_paths.append('/_debug_toolbar/')
def always_authorize():
for path in public_paths:
if request.path.startswith(path):
logging.warning('Bypassing ACL check for %s (matches %s)', request.path, path)
request._authorized = True
return
app.before_request(always_authorize)
bouncer.init_app(app)
def check_or_404(response: Response):
if ((response.status_code // 100) != 2):
return response
try:
return bouncer.check_authorization(response)
except Forbidden:
logging.warning('Automatically denied access to response %d of %s', response.status_code, request.path)
raise
app.after_request(check_or_404) | def register_extensions(app, test_config=None):
Bootstrap(app)
public_paths = ['/favicon.ico', '/static/']
csrf = CSRFProtect()
csrf.init_app(app)
oauth.init_app(app)
if ((not cfg.IS_PROD) and (not test_config)):
DebugToolbarExtension(app)
csrf.exempt(debug_toolbar_bp)
public_paths.append('/_debug_toolbar/')
def always_authorize():
for path in public_paths:
if request.path.startswith(path):
logging.warning('Bypassing ACL check for %s (matches %s)', request.path, path)
request._authorized = True
return
app.before_request(always_authorize)
bouncer.init_app(app)
def check_or_404(response: Response):
if ((response.status_code // 100) != 2):
return response
try:
return bouncer.check_authorization(response)
except Forbidden:
logging.warning('Automatically denied access to response %d of %s', response.status_code, request.path)
raise
app.after_request(check_or_404)<|docstring|>Register Flask extensions.<|endoftext|> |
67ad1bd21180718b95116b833f785c6a5a2cb62b014bc21f3981430578847efb | def register_blueprints(app):
'Register Flask blueprints.'
app.register_blueprint(admin_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(api_bp)
app.register_blueprint(api_v1_bp)
app.register_blueprint(frontend_bp)
app.register_blueprint(product_bp)
app.register_blueprint(vcs_proxy_bp)
app.register_blueprint(vuln_bp)
app.register_blueprint(profile_bp)
app.register_blueprint(review_bp) | Register Flask blueprints. | lib/app_factory.py | register_blueprints | pombredanne/vulncode-db | 592 | python | def register_blueprints(app):
app.register_blueprint(admin_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(api_bp)
app.register_blueprint(api_v1_bp)
app.register_blueprint(frontend_bp)
app.register_blueprint(product_bp)
app.register_blueprint(vcs_proxy_bp)
app.register_blueprint(vuln_bp)
app.register_blueprint(profile_bp)
app.register_blueprint(review_bp) | def register_blueprints(app):
app.register_blueprint(admin_bp)
app.register_blueprint(auth_bp)
app.register_blueprint(api_bp)
app.register_blueprint(api_v1_bp)
app.register_blueprint(frontend_bp)
app.register_blueprint(product_bp)
app.register_blueprint(vcs_proxy_bp)
app.register_blueprint(vuln_bp)
app.register_blueprint(profile_bp)
app.register_blueprint(review_bp)<|docstring|>Register Flask blueprints.<|endoftext|> |
aa21c16c9c36513a7f4ec34753abb30c5eba73d2d76aa24982f8d3fc80573ed4 | def cnRemainder(ms):
'Chinese remainder theorem.\n (moduli, residues) -> Either explanation or solution\n '
def go(ms, rs):
mp = numericProduct(ms)
cms = [(mp // x) for x in ms]
def possibleSoln(invs):
return Right((sum(map(mul, cms, map(mul, rs, invs))) % mp))
return bindLR(zipWithEither(modMultInv)(cms)(ms))(possibleSoln)
return (lambda rs: go(ms, rs)) | Chinese remainder theorem.
(moduli, residues) -> Either explanation or solution | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | cnRemainder | mullikine/RosettaCodeData | 1 | python | def cnRemainder(ms):
'Chinese remainder theorem.\n (moduli, residues) -> Either explanation or solution\n '
def go(ms, rs):
mp = numericProduct(ms)
cms = [(mp // x) for x in ms]
def possibleSoln(invs):
return Right((sum(map(mul, cms, map(mul, rs, invs))) % mp))
return bindLR(zipWithEither(modMultInv)(cms)(ms))(possibleSoln)
return (lambda rs: go(ms, rs)) | def cnRemainder(ms):
'Chinese remainder theorem.\n (moduli, residues) -> Either explanation or solution\n '
def go(ms, rs):
mp = numericProduct(ms)
cms = [(mp // x) for x in ms]
def possibleSoln(invs):
return Right((sum(map(mul, cms, map(mul, rs, invs))) % mp))
return bindLR(zipWithEither(modMultInv)(cms)(ms))(possibleSoln)
return (lambda rs: go(ms, rs))<|docstring|>Chinese remainder theorem.
(moduli, residues) -> Either explanation or solution<|endoftext|> |
8eecb336980150c2e57ac21944c865c47c82dd1d621986e961dcb1f2c53826fa | def modMultInv(a, b):
'Modular multiplicative inverse.'
(x, y) = eGcd(a, b)
return (Right(x) if (1 == ((a * x) + (b * y))) else Left(((('no modular inverse for ' + str(a)) + ' and ') + str(b)))) | Modular multiplicative inverse. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | modMultInv | mullikine/RosettaCodeData | 1 | python | def modMultInv(a, b):
(x, y) = eGcd(a, b)
return (Right(x) if (1 == ((a * x) + (b * y))) else Left(((('no modular inverse for ' + str(a)) + ' and ') + str(b)))) | def modMultInv(a, b):
(x, y) = eGcd(a, b)
return (Right(x) if (1 == ((a * x) + (b * y))) else Left(((('no modular inverse for ' + str(a)) + ' and ') + str(b))))<|docstring|>Modular multiplicative inverse.<|endoftext|> |
6ec8bc5a3615884d80dde51a290ca142ec5f9bcebd30b9a3a3d6e76a9a3f5a26 | def eGcd(a, b):
'Extended greatest common divisor.'
def go(a, b):
if (0 == b):
return (1, 0)
else:
(q, r) = divmod(a, b)
(s, t) = go(b, r)
return (t, (s - (q * t)))
return go(a, b) | Extended greatest common divisor. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | eGcd | mullikine/RosettaCodeData | 1 | python | def eGcd(a, b):
def go(a, b):
if (0 == b):
return (1, 0)
else:
(q, r) = divmod(a, b)
(s, t) = go(b, r)
return (t, (s - (q * t)))
return go(a, b) | def eGcd(a, b):
def go(a, b):
if (0 == b):
return (1, 0)
else:
(q, r) = divmod(a, b)
(s, t) = go(b, r)
return (t, (s - (q * t)))
return go(a, b)<|docstring|>Extended greatest common divisor.<|endoftext|> |
4b7310571845c8fc023dc511fe5bc7a6ee7df8771a6fde325d7b7d9c57943fe3 | def main():
'Tests of soluble and insoluble cases.'
print(fTable(((__doc__ + ':\n\n (moduli, residues) -> ') + 'Either solution or explanation\n'))(repr)(either(compose(quoted("'"))(curry(add)('No solution: ')))(compose(quoted(' '))(repr)))(uncurry(cnRemainder))([([10, 4, 12], [11, 12, 13]), ([11, 12, 13], [10, 4, 12]), ([10, 4, 9], [11, 22, 19]), ([3, 5, 7], [2, 3, 2]), ([2, 3, 2], [3, 5, 7])])) | Tests of soluble and insoluble cases. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | main | mullikine/RosettaCodeData | 1 | python | def main():
print(fTable(((__doc__ + ':\n\n (moduli, residues) -> ') + 'Either solution or explanation\n'))(repr)(either(compose(quoted("'"))(curry(add)('No solution: ')))(compose(quoted(' '))(repr)))(uncurry(cnRemainder))([([10, 4, 12], [11, 12, 13]), ([11, 12, 13], [10, 4, 12]), ([10, 4, 9], [11, 22, 19]), ([3, 5, 7], [2, 3, 2]), ([2, 3, 2], [3, 5, 7])])) | def main():
print(fTable(((__doc__ + ':\n\n (moduli, residues) -> ') + 'Either solution or explanation\n'))(repr)(either(compose(quoted("'"))(curry(add)('No solution: ')))(compose(quoted(' '))(repr)))(uncurry(cnRemainder))([([10, 4, 12], [11, 12, 13]), ([11, 12, 13], [10, 4, 12]), ([10, 4, 9], [11, 22, 19]), ([3, 5, 7], [2, 3, 2]), ([2, 3, 2], [3, 5, 7])]))<|docstring|>Tests of soluble and insoluble cases.<|endoftext|> |
d93e2b27a14b4f1387f9d4fc110b8fc326281478d7cecbb41095b2a856991666 | def Left(x):
'Constructor for an empty Either (option type) value\n with an associated string.'
return {'type': 'Either', 'Right': None, 'Left': x} | Constructor for an empty Either (option type) value
with an associated string. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | Left | mullikine/RosettaCodeData | 1 | python | def Left(x):
'Constructor for an empty Either (option type) value\n with an associated string.'
return {'type': 'Either', 'Right': None, 'Left': x} | def Left(x):
'Constructor for an empty Either (option type) value\n with an associated string.'
return {'type': 'Either', 'Right': None, 'Left': x}<|docstring|>Constructor for an empty Either (option type) value
with an associated string.<|endoftext|> |
5a76031363ff580ceb740af29844f13b7037c76f90bb7cd3eb00b76f66d3a279 | def Right(x):
'Constructor for a populated Either (option type) value'
return {'type': 'Either', 'Left': None, 'Right': x} | Constructor for a populated Either (option type) value | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | Right | mullikine/RosettaCodeData | 1 | python | def Right(x):
return {'type': 'Either', 'Left': None, 'Right': x} | def Right(x):
return {'type': 'Either', 'Left': None, 'Right': x}<|docstring|>Constructor for a populated Either (option type) value<|endoftext|> |
42e2a5b896055160ff121e322a72d7517e6249687963d7134ce44f49363eee70 | def any_(p):
'True if p(x) holds for at least\n one item in xs.'
def go(xs):
for x in xs:
if p(x):
return True
return False
return (lambda xs: go(xs)) | True if p(x) holds for at least
one item in xs. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | any_ | mullikine/RosettaCodeData | 1 | python | def any_(p):
'True if p(x) holds for at least\n one item in xs.'
def go(xs):
for x in xs:
if p(x):
return True
return False
return (lambda xs: go(xs)) | def any_(p):
'True if p(x) holds for at least\n one item in xs.'
def go(xs):
for x in xs:
if p(x):
return True
return False
return (lambda xs: go(xs))<|docstring|>True if p(x) holds for at least
one item in xs.<|endoftext|> |
0c003c74418b0fb2cca75cc20e25cc5ea12138cb6055333cbf4d66b07d246b83 | def bindLR(m):
'Either monad injection operator.\n Two computations sequentially composed,\n with any value produced by the first\n passed as an argument to the second.'
return (lambda mf: (mf(m.get('Right')) if (None is m.get('Left')) else m)) | Either monad injection operator.
Two computations sequentially composed,
with any value produced by the first
passed as an argument to the second. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | bindLR | mullikine/RosettaCodeData | 1 | python | def bindLR(m):
'Either monad injection operator.\n Two computations sequentially composed,\n with any value produced by the first\n passed as an argument to the second.'
return (lambda mf: (mf(m.get('Right')) if (None is m.get('Left')) else m)) | def bindLR(m):
'Either monad injection operator.\n Two computations sequentially composed,\n with any value produced by the first\n passed as an argument to the second.'
return (lambda mf: (mf(m.get('Right')) if (None is m.get('Left')) else m))<|docstring|>Either monad injection operator.
Two computations sequentially composed,
with any value produced by the first
passed as an argument to the second.<|endoftext|> |
db83133929783cffb42d3ca038fb5a28d9e7251f4a063d60f2fbd68d583f53b6 | def compose(g):
'Right to left function composition.'
return (lambda f: (lambda x: g(f(x)))) | Right to left function composition. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | compose | mullikine/RosettaCodeData | 1 | python | def compose(g):
return (lambda f: (lambda x: g(f(x)))) | def compose(g):
return (lambda f: (lambda x: g(f(x))))<|docstring|>Right to left function composition.<|endoftext|> |
ceccfb71a9cc9700367741548eee67037ffc94a91a0a0f9dca60b94db212e1db | def curry(f):
'A curried function derived\n from an uncurried function.'
return (lambda a: (lambda b: f(a, b))) | A curried function derived
from an uncurried function. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | curry | mullikine/RosettaCodeData | 1 | python | def curry(f):
'A curried function derived\n from an uncurried function.'
return (lambda a: (lambda b: f(a, b))) | def curry(f):
'A curried function derived\n from an uncurried function.'
return (lambda a: (lambda b: f(a, b)))<|docstring|>A curried function derived
from an uncurried function.<|endoftext|> |
7daf3a4f2b7d955e88ece75b92280982e4e0fcecf6935a17550472ce1be018b3 | def either(fl):
'The application of fl to e if e is a Left value,\n or the application of fr to e if e is a Right value.'
return (lambda fr: (lambda e: (fl(e['Left']) if (None is e['Right']) else fr(e['Right'])))) | The application of fl to e if e is a Left value,
or the application of fr to e if e is a Right value. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | either | mullikine/RosettaCodeData | 1 | python | def either(fl):
'The application of fl to e if e is a Left value,\n or the application of fr to e if e is a Right value.'
return (lambda fr: (lambda e: (fl(e['Left']) if (None is e['Right']) else fr(e['Right'])))) | def either(fl):
'The application of fl to e if e is a Left value,\n or the application of fr to e if e is a Right value.'
return (lambda fr: (lambda e: (fl(e['Left']) if (None is e['Right']) else fr(e['Right']))))<|docstring|>The application of fl to e if e is a Left value,
or the application of fr to e if e is a Right value.<|endoftext|> |
85c110da3241b035b35355ff5a2e5eec75c8f8f0e2ca8d3f2dde33b5ccc0c270 | def fTable(s):
'Heading -> x display function ->\n fx display function ->\n f -> value list -> tabular string.'
def go(xShow, fxShow, f, xs):
w = max(map(compose(len)(xShow), xs))
return ((s + '\n') + '\n'.join([((xShow(x).rjust(w, ' ') + ' -> ') + fxShow(f(x))) for x in xs]))
return (lambda xShow: (lambda fxShow: (lambda f: (lambda xs: go(xShow, fxShow, f, xs))))) | Heading -> x display function ->
fx display function ->
f -> value list -> tabular string. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | fTable | mullikine/RosettaCodeData | 1 | python | def fTable(s):
'Heading -> x display function ->\n fx display function ->\n f -> value list -> tabular string.'
def go(xShow, fxShow, f, xs):
w = max(map(compose(len)(xShow), xs))
return ((s + '\n') + '\n'.join([((xShow(x).rjust(w, ' ') + ' -> ') + fxShow(f(x))) for x in xs]))
return (lambda xShow: (lambda fxShow: (lambda f: (lambda xs: go(xShow, fxShow, f, xs))))) | def fTable(s):
'Heading -> x display function ->\n fx display function ->\n f -> value list -> tabular string.'
def go(xShow, fxShow, f, xs):
w = max(map(compose(len)(xShow), xs))
return ((s + '\n') + '\n'.join([((xShow(x).rjust(w, ' ') + ' -> ') + fxShow(f(x))) for x in xs]))
return (lambda xShow: (lambda fxShow: (lambda f: (lambda xs: go(xShow, fxShow, f, xs)))))<|docstring|>Heading -> x display function ->
fx display function ->
f -> value list -> tabular string.<|endoftext|> |
e1d80058368e312cb639d6b89261710f4b410b570aef48614d69d447ad3e8aba | def numericProduct(xs):
'The arithmetic product of all numbers in xs.'
return reduce(mul, xs, 1) | The arithmetic product of all numbers in xs. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | numericProduct | mullikine/RosettaCodeData | 1 | python | def numericProduct(xs):
return reduce(mul, xs, 1) | def numericProduct(xs):
return reduce(mul, xs, 1)<|docstring|>The arithmetic product of all numbers in xs.<|endoftext|> |
2b7392a54c2b34a5ad32475c249e61e8681d571275c56d558430ebae2afd46fa | def partitionEithers(lrs):
'A list of Either values partitioned into a tuple\n of two lists, with all Left elements extracted\n into the first list, and Right elements\n extracted into the second list.\n '
def go(a, x):
(ls, rs) = a
r = x.get('Right')
return (((ls + [x.get('Left')]), rs) if (None is r) else (ls, (rs + [r])))
return reduce(go, lrs, ([], [])) | A list of Either values partitioned into a tuple
of two lists, with all Left elements extracted
into the first list, and Right elements
extracted into the second list. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | partitionEithers | mullikine/RosettaCodeData | 1 | python | def partitionEithers(lrs):
'A list of Either values partitioned into a tuple\n of two lists, with all Left elements extracted\n into the first list, and Right elements\n extracted into the second list.\n '
def go(a, x):
(ls, rs) = a
r = x.get('Right')
return (((ls + [x.get('Left')]), rs) if (None is r) else (ls, (rs + [r])))
return reduce(go, lrs, ([], [])) | def partitionEithers(lrs):
'A list of Either values partitioned into a tuple\n of two lists, with all Left elements extracted\n into the first list, and Right elements\n extracted into the second list.\n '
def go(a, x):
(ls, rs) = a
r = x.get('Right')
return (((ls + [x.get('Left')]), rs) if (None is r) else (ls, (rs + [r])))
return reduce(go, lrs, ([], []))<|docstring|>A list of Either values partitioned into a tuple
of two lists, with all Left elements extracted
into the first list, and Right elements
extracted into the second list.<|endoftext|> |
9b9aac0c21fa92e76717524d881408e32bcd52a9664d40d7c690daa469ea4442 | def quoted(c):
'A string flanked on both sides\n by a specified quote character.\n '
return (lambda s: ((c + s) + c)) | A string flanked on both sides
by a specified quote character. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | quoted | mullikine/RosettaCodeData | 1 | python | def quoted(c):
'A string flanked on both sides\n by a specified quote character.\n '
return (lambda s: ((c + s) + c)) | def quoted(c):
'A string flanked on both sides\n by a specified quote character.\n '
return (lambda s: ((c + s) + c))<|docstring|>A string flanked on both sides
by a specified quote character.<|endoftext|> |
9a1af0f9bfcb07e753aa2ddb5c8304bb31f634726917cf7e224e70ce90f296a3 | def uncurry(f):
'A function over a tuple,\n derived from a curried function.'
return (lambda xy: f(xy[0])(xy[1])) | A function over a tuple,
derived from a curried function. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | uncurry | mullikine/RosettaCodeData | 1 | python | def uncurry(f):
'A function over a tuple,\n derived from a curried function.'
return (lambda xy: f(xy[0])(xy[1])) | def uncurry(f):
'A function over a tuple,\n derived from a curried function.'
return (lambda xy: f(xy[0])(xy[1]))<|docstring|>A function over a tuple,
derived from a curried function.<|endoftext|> |
9fe5cdc0ede46c439de8a86f06fc21caa1d6c8f0d41a74b33a506e62ac2ee169 | def zipWithEither(f):
'Either a list of results if f succeeds with every pair\n in the zip of xs and ys, or an explanatory string\n if any application of f returns no result.\n '
def go(xs, ys):
(ls, rs) = partitionEithers(map(f, xs, ys))
return (Left(ls[0]) if ls else Right(rs))
return (lambda xs: (lambda ys: go(xs, ys))) | Either a list of results if f succeeds with every pair
in the zip of xs and ys, or an explanatory string
if any application of f returns no result. | Task/Chinese-remainder-theorem/Python/chinese-remainder-theorem-3.py | zipWithEither | mullikine/RosettaCodeData | 1 | python | def zipWithEither(f):
'Either a list of results if f succeeds with every pair\n in the zip of xs and ys, or an explanatory string\n if any application of f returns no result.\n '
def go(xs, ys):
(ls, rs) = partitionEithers(map(f, xs, ys))
return (Left(ls[0]) if ls else Right(rs))
return (lambda xs: (lambda ys: go(xs, ys))) | def zipWithEither(f):
'Either a list of results if f succeeds with every pair\n in the zip of xs and ys, or an explanatory string\n if any application of f returns no result.\n '
def go(xs, ys):
(ls, rs) = partitionEithers(map(f, xs, ys))
return (Left(ls[0]) if ls else Right(rs))
return (lambda xs: (lambda ys: go(xs, ys)))<|docstring|>Either a list of results if f succeeds with every pair
in the zip of xs and ys, or an explanatory string
if any application of f returns no result.<|endoftext|> |
13bdad0ac38a120865177590c77ae0f7231f15bb7de8e0f025b8f6836f154425 | @abstractproperty
def loop(self) -> asyncio.AbstractEventLoop:
'\n Get the stored event loop or return one from the environment\n Should be stored in `self._loop` in the child class\n ' | Get the stored event loop or return one from the environment
Should be stored in `self._loop` in the child class | portscanner/mixins/loop.py | loop | GoodiesHQ/portscanner | 0 | python | @abstractproperty
def loop(self) -> asyncio.AbstractEventLoop:
'\n Get the stored event loop or return one from the environment\n Should be stored in `self._loop` in the child class\n ' | @abstractproperty
def loop(self) -> asyncio.AbstractEventLoop:
'\n Get the stored event loop or return one from the environment\n Should be stored in `self._loop` in the child class\n '<|docstring|>Get the stored event loop or return one from the environment
Should be stored in `self._loop` in the child class<|endoftext|> |
271986193db4ee6a78b2202b63d5bd2a1c2a6623c1e4b27d7845eb275437657e | @abstractstaticmethod
def _get_loop() -> asyncio.AbstractEventLoop:
"\n Get the environment loop. It is up to the implementation\n if you'd like to raise an exception or create and set a new loop\n " | Get the environment loop. It is up to the implementation
if you'd like to raise an exception or create and set a new loop | portscanner/mixins/loop.py | _get_loop | GoodiesHQ/portscanner | 0 | python | @abstractstaticmethod
def _get_loop() -> asyncio.AbstractEventLoop:
"\n Get the environment loop. It is up to the implementation\n if you'd like to raise an exception or create and set a new loop\n " | @abstractstaticmethod
def _get_loop() -> asyncio.AbstractEventLoop:
"\n Get the environment loop. It is up to the implementation\n if you'd like to raise an exception or create and set a new loop\n "<|docstring|>Get the environment loop. It is up to the implementation
if you'd like to raise an exception or create and set a new loop<|endoftext|> |
07d25e3fc61cc46e3454cd91c0267be58ae58d742eba90505e6866d52bf37f56 | def __init__(self, product_name: str, recipes_list: list):
'\n Initialize instance with product name string and list of Recipe instances.\n '
self._product_name = product_name
self._recipes_list = recipes_list | Initialize instance with product name string and list of Recipe instances. | satisfy_calc/coproduct_recipes.py | __init__ | sedatDemiriz/satisfy-calc | 0 | python | def __init__(self, product_name: str, recipes_list: list):
'\n \n '
self._product_name = product_name
self._recipes_list = recipes_list | def __init__(self, product_name: str, recipes_list: list):
'\n \n '
self._product_name = product_name
self._recipes_list = recipes_list<|docstring|>Initialize instance with product name string and list of Recipe instances.<|endoftext|> |
33e0208c3012df01cfddcfd0dfbdfc3d4063f8995c991ac84d6e30efbe884d7a | def __str__(self):
'\n Return summary of instance using product name and number of recipes included.\n '
num_recipes = self.num_recipes
string = '{}: {} recipe'.format(self._product_name, num_recipes)
if (num_recipes > 1):
return (string + 's')
else:
return string | Return summary of instance using product name and number of recipes included. | satisfy_calc/coproduct_recipes.py | __str__ | sedatDemiriz/satisfy-calc | 0 | python | def __str__(self):
'\n \n '
num_recipes = self.num_recipes
string = '{}: {} recipe'.format(self._product_name, num_recipes)
if (num_recipes > 1):
return (string + 's')
else:
return string | def __str__(self):
'\n \n '
num_recipes = self.num_recipes
string = '{}: {} recipe'.format(self._product_name, num_recipes)
if (num_recipes > 1):
return (string + 's')
else:
return string<|docstring|>Return summary of instance using product name and number of recipes included.<|endoftext|> |
bfa6d95a1f18dcd8df7569c86b1aca31f269dd55f5ced35b2cc275965622804e | def __repr__(self):
'\n TODO\n '
return self.__str__() | TODO | satisfy_calc/coproduct_recipes.py | __repr__ | sedatDemiriz/satisfy-calc | 0 | python | def __repr__(self):
'\n \n '
return self.__str__() | def __repr__(self):
'\n \n '
return self.__str__()<|docstring|>TODO<|endoftext|> |
952f53e5f8fc1f6f98268040b56b65243b354bf1ab4d81fbaef8cd434d1e80a3 | def print_summary(self):
'\n Prints all Recipe instances contained within Coproduct Recipe instance.\n '
n = 1
for recipe in self.recipes:
print(str(n), '-', recipe.summary)
n += 1 | Prints all Recipe instances contained within Coproduct Recipe instance. | satisfy_calc/coproduct_recipes.py | print_summary | sedatDemiriz/satisfy-calc | 0 | python | def print_summary(self):
'\n \n '
n = 1
for recipe in self.recipes:
print(str(n), '-', recipe.summary)
n += 1 | def print_summary(self):
'\n \n '
n = 1
for recipe in self.recipes:
print(str(n), '-', recipe.summary)
n += 1<|docstring|>Prints all Recipe instances contained within Coproduct Recipe instance.<|endoftext|> |
bf44becddf120bf85ec1d841051320799601d9b934aec9ae980797d4a3319260 | @property
def product(self):
'\n Return product name string.\n '
return self._product_name | Return product name string. | satisfy_calc/coproduct_recipes.py | product | sedatDemiriz/satisfy-calc | 0 | python | @property
def product(self):
'\n \n '
return self._product_name | @property
def product(self):
'\n \n '
return self._product_name<|docstring|>Return product name string.<|endoftext|> |
d118a2d6028a62ea2d105a94941e70abd625a2d89151f226d17807e6056c5f3f | @product.setter
def product(self, product):
'\n Product name property setter.\n '
self._product_name = product | Product name property setter. | satisfy_calc/coproduct_recipes.py | product | sedatDemiriz/satisfy-calc | 0 | python | @product.setter
def product(self, product):
'\n \n '
self._product_name = product | @product.setter
def product(self, product):
'\n \n '
self._product_name = product<|docstring|>Product name property setter.<|endoftext|> |
0001e74f9f27fb7ab66aadb376b47184e604a3d9e7248171bb4514bf0e2e1364 | @property
def recipes(self):
'\n Return list of all included Recipe instances.\n '
return self._recipes_list | Return list of all included Recipe instances. | satisfy_calc/coproduct_recipes.py | recipes | sedatDemiriz/satisfy-calc | 0 | python | @property
def recipes(self):
'\n \n '
return self._recipes_list | @property
def recipes(self):
'\n \n '
return self._recipes_list<|docstring|>Return list of all included Recipe instances.<|endoftext|> |
4044677e3f53e99f4c51766162cb89d0479a47a229a54ecbb9c3e5e6c6018275 | @recipes.setter
def recipes(self, recipes):
'\n Recipes property setter.\n '
self._recipes = recipes | Recipes property setter. | satisfy_calc/coproduct_recipes.py | recipes | sedatDemiriz/satisfy-calc | 0 | python | @recipes.setter
def recipes(self, recipes):
'\n \n '
self._recipes = recipes | @recipes.setter
def recipes(self, recipes):
'\n \n '
self._recipes = recipes<|docstring|>Recipes property setter.<|endoftext|> |
5fb210f66a8d21e31a931e521a9da2b498d10ed2d94a1118f41595a288ec3ce1 | @property
def num_recipes(self):
'\n Return number of included Recipe instances.\n '
return len(self._recipes_list) | Return number of included Recipe instances. | satisfy_calc/coproduct_recipes.py | num_recipes | sedatDemiriz/satisfy-calc | 0 | python | @property
def num_recipes(self):
'\n \n '
return len(self._recipes_list) | @property
def num_recipes(self):
'\n \n '
return len(self._recipes_list)<|docstring|>Return number of included Recipe instances.<|endoftext|> |
43f0b7d0e6abc91298c54dc1cb811442454e0eeb9357cde1e4ffac7cf57e1628 | @property
def is_raw(self):
'\n Return True if material is raw.\n '
return (self._recipes_list == []) | Return True if material is raw. | satisfy_calc/coproduct_recipes.py | is_raw | sedatDemiriz/satisfy-calc | 0 | python | @property
def is_raw(self):
'\n \n '
return (self._recipes_list == []) | @property
def is_raw(self):
'\n \n '
return (self._recipes_list == [])<|docstring|>Return True if material is raw.<|endoftext|> |
79a0728174148fe34dd25926ba78713d84ef089cd8ed92e2d1f3760e5049c354 | def __init__(self, *args, **kwargs):
' Initialize a wxProperCheckBox.\n\n *args, **kwargs\n The positional and keyword arguments required to initialize\n a wx.RadioButton.\n\n '
super(wxProperCheckBox, self).__init__(*args, **kwargs)
self._in_click = False
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_CHECKBOX, self.OnToggled) | Initialize a wxProperCheckBox.
*args, **kwargs
The positional and keyword arguments required to initialize
a wx.RadioButton. | enaml/wx/wx_check_box.py | __init__ | pberkes/enaml | 11 | python | def __init__(self, *args, **kwargs):
' Initialize a wxProperCheckBox.\n\n *args, **kwargs\n The positional and keyword arguments required to initialize\n a wx.RadioButton.\n\n '
super(wxProperCheckBox, self).__init__(*args, **kwargs)
self._in_click = False
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_CHECKBOX, self.OnToggled) | def __init__(self, *args, **kwargs):
' Initialize a wxProperCheckBox.\n\n *args, **kwargs\n The positional and keyword arguments required to initialize\n a wx.RadioButton.\n\n '
super(wxProperCheckBox, self).__init__(*args, **kwargs)
self._in_click = False
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_CHECKBOX, self.OnToggled)<|docstring|>Initialize a wxProperCheckBox.
*args, **kwargs
The positional and keyword arguments required to initialize
a wx.RadioButton.<|endoftext|> |
05d56452d611d91f196a779a034ecdb79b40d13028d88d4a12684e34eed0a399 | def OnLeftDown(self, event):
' Handles the left down mouse event for the check box.\n\n This is first part of generating a click event.\n\n '
event.Skip()
self._in_click = True | Handles the left down mouse event for the check box.
This is first part of generating a click event. | enaml/wx/wx_check_box.py | OnLeftDown | pberkes/enaml | 11 | python | def OnLeftDown(self, event):
' Handles the left down mouse event for the check box.\n\n This is first part of generating a click event.\n\n '
event.Skip()
self._in_click = True | def OnLeftDown(self, event):
' Handles the left down mouse event for the check box.\n\n This is first part of generating a click event.\n\n '
event.Skip()
self._in_click = True<|docstring|>Handles the left down mouse event for the check box.
This is first part of generating a click event.<|endoftext|> |
c52bec0cecd7929faa48f1b4d99a022a398204ace91a80f8ec72ee22a0a1babb | def OnLeftUp(self, event):
' Handles the left up mouse event for the check box.\n\n This is the second part of generating a click event.\n\n '
event.Skip()
if self._in_click:
self._in_click = False
event = wxCheckBoxClicked()
wx.PostEvent(self, event) | Handles the left up mouse event for the check box.
This is the second part of generating a click event. | enaml/wx/wx_check_box.py | OnLeftUp | pberkes/enaml | 11 | python | def OnLeftUp(self, event):
' Handles the left up mouse event for the check box.\n\n This is the second part of generating a click event.\n\n '
event.Skip()
if self._in_click:
self._in_click = False
event = wxCheckBoxClicked()
wx.PostEvent(self, event) | def OnLeftUp(self, event):
' Handles the left up mouse event for the check box.\n\n This is the second part of generating a click event.\n\n '
event.Skip()
if self._in_click:
self._in_click = False
event = wxCheckBoxClicked()
wx.PostEvent(self, event)<|docstring|>Handles the left up mouse event for the check box.
This is the second part of generating a click event.<|endoftext|> |
0ef048fc569c61809f6e9287b1434d81d2e83462a59359676f3a59781ac13b93 | def OnToggled(self, event):
' Handles the standard toggle event and emits the custom\n toggle event for the check box.\n\n '
event = wxCheckBoxToggled()
wx.PostEvent(self, event) | Handles the standard toggle event and emits the custom
toggle event for the check box. | enaml/wx/wx_check_box.py | OnToggled | pberkes/enaml | 11 | python | def OnToggled(self, event):
' Handles the standard toggle event and emits the custom\n toggle event for the check box.\n\n '
event = wxCheckBoxToggled()
wx.PostEvent(self, event) | def OnToggled(self, event):
' Handles the standard toggle event and emits the custom\n toggle event for the check box.\n\n '
event = wxCheckBoxToggled()
wx.PostEvent(self, event)<|docstring|>Handles the standard toggle event and emits the custom
toggle event for the check box.<|endoftext|> |
cdb69e9930ba3d1a559c00f6bfa4231d72675d9c6a8f0891e350f54afe73019a | def SetValue(self, val):
' Overrides the default SetValue method to emit proper events.\n\n '
old = self.GetValue()
if (old != val):
super(wxProperCheckBox, self).SetValue(val)
self._last = val
event = wxCheckBoxToggled()
wx.PostEvent(self, event) | Overrides the default SetValue method to emit proper events. | enaml/wx/wx_check_box.py | SetValue | pberkes/enaml | 11 | python | def SetValue(self, val):
' \n\n '
old = self.GetValue()
if (old != val):
super(wxProperCheckBox, self).SetValue(val)
self._last = val
event = wxCheckBoxToggled()
wx.PostEvent(self, event) | def SetValue(self, val):
' \n\n '
old = self.GetValue()
if (old != val):
super(wxProperCheckBox, self).SetValue(val)
self._last = val
event = wxCheckBoxToggled()
wx.PostEvent(self, event)<|docstring|>Overrides the default SetValue method to emit proper events.<|endoftext|> |
770e64213f8b9010ddecc18786827821ad3ad51d38f39fe5c86fa31b6510f062 | def create_widget(self):
' Create the underlying check box widget.\n\n '
self.widget = wxProperCheckBox(self.parent_widget()) | Create the underlying check box widget. | enaml/wx/wx_check_box.py | create_widget | pberkes/enaml | 11 | python | def create_widget(self):
' \n\n '
self.widget = wxProperCheckBox(self.parent_widget()) | def create_widget(self):
' \n\n '
self.widget = wxProperCheckBox(self.parent_widget())<|docstring|>Create the underlying check box widget.<|endoftext|> |
7f72dd53d73575330ec9593fa95e93e128ba31099c3b16d679e15f791a50f91b | def init_widget(self):
' Create and initialize the check box control.\n\n '
super(WxCheckBox, self).init_widget()
widget = self.widget
widget.Bind(EVT_CHECKBOX_CLICKED, self.on_clicked)
widget.Bind(EVT_CHECKBOX_TOGGLED, self.on_toggled) | Create and initialize the check box control. | enaml/wx/wx_check_box.py | init_widget | pberkes/enaml | 11 | python | def init_widget(self):
' \n\n '
super(WxCheckBox, self).init_widget()
widget = self.widget
widget.Bind(EVT_CHECKBOX_CLICKED, self.on_clicked)
widget.Bind(EVT_CHECKBOX_TOGGLED, self.on_toggled) | def init_widget(self):
' \n\n '
super(WxCheckBox, self).init_widget()
widget = self.widget
widget.Bind(EVT_CHECKBOX_CLICKED, self.on_clicked)
widget.Bind(EVT_CHECKBOX_TOGGLED, self.on_toggled)<|docstring|>Create and initialize the check box control.<|endoftext|> |
ea9f06150aaa57a95ad4931616fd8332ffd0d33f70ba71b8be3c63dbad8d21d4 | def set_checkable(self, checkable):
' Sets whether or not the widget is checkable.\n\n This is not supported in Wx.\n\n '
pass | Sets whether or not the widget is checkable.
This is not supported in Wx. | enaml/wx/wx_check_box.py | set_checkable | pberkes/enaml | 11 | python | def set_checkable(self, checkable):
' Sets whether or not the widget is checkable.\n\n This is not supported in Wx.\n\n '
pass | def set_checkable(self, checkable):
' Sets whether or not the widget is checkable.\n\n This is not supported in Wx.\n\n '
pass<|docstring|>Sets whether or not the widget is checkable.
This is not supported in Wx.<|endoftext|> |
810e88e67d90f2623978aed8d96120304affc3aa1fd6c6d9fda781d7d9422cfd | def get_checked(self):
' Returns the checked state of the widget.\n\n '
return self.widget.GetValue() | Returns the checked state of the widget. | enaml/wx/wx_check_box.py | get_checked | pberkes/enaml | 11 | python | def get_checked(self):
' \n\n '
return self.widget.GetValue() | def get_checked(self):
' \n\n '
return self.widget.GetValue()<|docstring|>Returns the checked state of the widget.<|endoftext|> |
5e1a42bcc2172abf5683182fc0f7285e171e1214b0b86119f6933aad50251b01 | def set_checked(self, checked):
" Sets the widget's checked state with the provided value.\n\n "
self._guard |= CHECKED_GUARD
try:
self.widget.SetValue(checked)
finally:
self._guard &= (~ CHECKED_GUARD) | Sets the widget's checked state with the provided value. | enaml/wx/wx_check_box.py | set_checked | pberkes/enaml | 11 | python | def set_checked(self, checked):
" \n\n "
self._guard |= CHECKED_GUARD
try:
self.widget.SetValue(checked)
finally:
self._guard &= (~ CHECKED_GUARD) | def set_checked(self, checked):
" \n\n "
self._guard |= CHECKED_GUARD
try:
self.widget.SetValue(checked)
finally:
self._guard &= (~ CHECKED_GUARD)<|docstring|>Sets the widget's checked state with the provided value.<|endoftext|> |
795a44f7e0637992266ad525b27b4f96550006dc8fa104e66dca9a9a754d8314 | def cheat(self):
'\n Returns False if there is not enough information to cheat, otherwise median predictions\n of current particpants that ranked top tier in the last completed period.\n '
participants = self.prediction_market.get_current_participants()
if (len(participants) == 0):
return False
others_predictions = {participant: {'new': self.prediction_market.get_predictions_for_agent(participant)} for participant in participants}
for participant in participants:
try:
old_preds = self.prediction_market.get_predictions_for_agent(participant, 2)
others_predictions[participant]['old'] = old_preds
except Exception:
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
for participant in participants:
mae = mean_absolute_error(self.aggregate_history[(- NUM_PREDICTIONS):], others_predictions[participant]['old'])
if (mae > TOP_TIER_THRESHOLD):
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
others_predictions_new = list(map((lambda entry: entry.get('new')), others_predictions.values()))
predictions = list(map(np.median, zip(*others_predictions_new)))
return (list(map(int, predictions)), list(others_predictions.keys())) | Returns False if there is not enough information to cheat, otherwise median predictions
of current particpants that ranked top tier in the last completed period. | agent/agents/cheating_agent.py | cheat | rampopat/charje | 1 | python | def cheat(self):
'\n Returns False if there is not enough information to cheat, otherwise median predictions\n of current particpants that ranked top tier in the last completed period.\n '
participants = self.prediction_market.get_current_participants()
if (len(participants) == 0):
return False
others_predictions = {participant: {'new': self.prediction_market.get_predictions_for_agent(participant)} for participant in participants}
for participant in participants:
try:
old_preds = self.prediction_market.get_predictions_for_agent(participant, 2)
others_predictions[participant]['old'] = old_preds
except Exception:
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
for participant in participants:
mae = mean_absolute_error(self.aggregate_history[(- NUM_PREDICTIONS):], others_predictions[participant]['old'])
if (mae > TOP_TIER_THRESHOLD):
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
others_predictions_new = list(map((lambda entry: entry.get('new')), others_predictions.values()))
predictions = list(map(np.median, zip(*others_predictions_new)))
return (list(map(int, predictions)), list(others_predictions.keys())) | def cheat(self):
'\n Returns False if there is not enough information to cheat, otherwise median predictions\n of current particpants that ranked top tier in the last completed period.\n '
participants = self.prediction_market.get_current_participants()
if (len(participants) == 0):
return False
others_predictions = {participant: {'new': self.prediction_market.get_predictions_for_agent(participant)} for participant in participants}
for participant in participants:
try:
old_preds = self.prediction_market.get_predictions_for_agent(participant, 2)
others_predictions[participant]['old'] = old_preds
except Exception:
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
for participant in participants:
mae = mean_absolute_error(self.aggregate_history[(- NUM_PREDICTIONS):], others_predictions[participant]['old'])
if (mae > TOP_TIER_THRESHOLD):
del others_predictions[participant]
participants = list(others_predictions.keys())
if (len(participants) == 0):
return False
others_predictions_new = list(map((lambda entry: entry.get('new')), others_predictions.values()))
predictions = list(map(np.median, zip(*others_predictions_new)))
return (list(map(int, predictions)), list(others_predictions.keys()))<|docstring|>Returns False if there is not enough information to cheat, otherwise median predictions
of current particpants that ranked top tier in the last completed period.<|endoftext|> |
cb31f02d360b64f166398ebfaca54fb2e6df00f6450577eea4c70f9ba1091e6d | def __initialize__(self):
"\n This function initializes the distance matrix and uploads some of the relevant information.\n It adds 4 new parameters to the class:\n - self.output: a text to print when running the algorithm.\n - self.song_list_indexed: a list of pairs '(index_song, song)' where 'song' is the name of the file and 'index_song' is its index.\n - self.n_songs: the number of songs.\n - self.dists: an array of size (n_songs x n_songs) corresponding to the distance between two songs. \n "
if (self.initialize_distances or (not osp.exists(osp.join(self.res_dir, 'song_list.txt')))):
self.output = 'Song list and distance matrix initialized'
self.song_list_indexed = [(i, s[:(- 4)]) for (i, s) in enumerate(os.listdir(self.mat_dir))]
self.n_songs = len(self.song_list_indexed)
self.dists = np.zeros((self.n_songs, self.n_songs))
with open(osp.join(self.res_dir, 'song_list.txt'), 'w') as song_list:
for (index_song, song) in self.song_list_indexed:
song_list.write((((str(index_song) + '\t') + song) + '\n'))
song_list.close()
else:
self.song_list_indexed = []
with open(osp.join(self.res_dir, 'song_list.txt'), 'r') as song_list:
for line in song_list:
(index_song, song) = line.split('\n')[0].split('\t')
index_song = int(index_song)
self.song_list_indexed.append((index_song, song))
self.n_songs = len(self.song_list_indexed)
if osp.exists(osp.join(self.res_dir, 'dists.txt')):
self.output = 'Song list and distance matrix uploaded'
self.dists = np.loadtxt(osp.join(self.res_dir, 'dists.txt'), delimiter='\t')
else:
self.output = 'Song list uploaded and distance matrix created'
self.dists = np.zeros((self.n_songs, self.n_songs)) | This function initializes the distance matrix and uploads some of the relevant information.
It adds 4 new parameters to the class:
- self.output: a text to print when running the algorithm.
- self.song_list_indexed: a list of pairs '(index_song, song)' where 'song' is the name of the file and 'index_song' is its index.
- self.n_songs: the number of songs.
- self.dists: an array of size (n_songs x n_songs) corresponding to the distance between two songs. | measures.py | __initialize__ | BenoitCorsini/music-patterns | 1 | python | def __initialize__(self):
"\n This function initializes the distance matrix and uploads some of the relevant information.\n It adds 4 new parameters to the class:\n - self.output: a text to print when running the algorithm.\n - self.song_list_indexed: a list of pairs '(index_song, song)' where 'song' is the name of the file and 'index_song' is its index.\n - self.n_songs: the number of songs.\n - self.dists: an array of size (n_songs x n_songs) corresponding to the distance between two songs. \n "
if (self.initialize_distances or (not osp.exists(osp.join(self.res_dir, 'song_list.txt')))):
self.output = 'Song list and distance matrix initialized'
self.song_list_indexed = [(i, s[:(- 4)]) for (i, s) in enumerate(os.listdir(self.mat_dir))]
self.n_songs = len(self.song_list_indexed)
self.dists = np.zeros((self.n_songs, self.n_songs))
with open(osp.join(self.res_dir, 'song_list.txt'), 'w') as song_list:
for (index_song, song) in self.song_list_indexed:
song_list.write((((str(index_song) + '\t') + song) + '\n'))
song_list.close()
else:
self.song_list_indexed = []
with open(osp.join(self.res_dir, 'song_list.txt'), 'r') as song_list:
for line in song_list:
(index_song, song) = line.split('\n')[0].split('\t')
index_song = int(index_song)
self.song_list_indexed.append((index_song, song))
self.n_songs = len(self.song_list_indexed)
if osp.exists(osp.join(self.res_dir, 'dists.txt')):
self.output = 'Song list and distance matrix uploaded'
self.dists = np.loadtxt(osp.join(self.res_dir, 'dists.txt'), delimiter='\t')
else:
self.output = 'Song list uploaded and distance matrix created'
self.dists = np.zeros((self.n_songs, self.n_songs)) | def __initialize__(self):
"\n This function initializes the distance matrix and uploads some of the relevant information.\n It adds 4 new parameters to the class:\n - self.output: a text to print when running the algorithm.\n - self.song_list_indexed: a list of pairs '(index_song, song)' where 'song' is the name of the file and 'index_song' is its index.\n - self.n_songs: the number of songs.\n - self.dists: an array of size (n_songs x n_songs) corresponding to the distance between two songs. \n "
if (self.initialize_distances or (not osp.exists(osp.join(self.res_dir, 'song_list.txt')))):
self.output = 'Song list and distance matrix initialized'
self.song_list_indexed = [(i, s[:(- 4)]) for (i, s) in enumerate(os.listdir(self.mat_dir))]
self.n_songs = len(self.song_list_indexed)
self.dists = np.zeros((self.n_songs, self.n_songs))
with open(osp.join(self.res_dir, 'song_list.txt'), 'w') as song_list:
for (index_song, song) in self.song_list_indexed:
song_list.write((((str(index_song) + '\t') + song) + '\n'))
song_list.close()
else:
self.song_list_indexed = []
with open(osp.join(self.res_dir, 'song_list.txt'), 'r') as song_list:
for line in song_list:
(index_song, song) = line.split('\n')[0].split('\t')
index_song = int(index_song)
self.song_list_indexed.append((index_song, song))
self.n_songs = len(self.song_list_indexed)
if osp.exists(osp.join(self.res_dir, 'dists.txt')):
self.output = 'Song list and distance matrix uploaded'
self.dists = np.loadtxt(osp.join(self.res_dir, 'dists.txt'), delimiter='\t')
else:
self.output = 'Song list uploaded and distance matrix created'
self.dists = np.zeros((self.n_songs, self.n_songs))<|docstring|>This function initializes the distance matrix and uploads some of the relevant information.
It adds 4 new parameters to the class:
- self.output: a text to print when running the algorithm.
- self.song_list_indexed: a list of pairs '(index_song, song)' where 'song' is the name of the file and 'index_song' is its index.
- self.n_songs: the number of songs.
- self.dists: an array of size (n_songs x n_songs) corresponding to the distance between two songs.<|endoftext|> |
8ef172617d56b41221d15b4f53bedf32f7272db07f33392e1cd2f48f82a9f5c3 | def distance(self, pat_mat1, pat_mat2):
'\n This function defines the distance we use between two pattern matrices.\n '
return (np.mean((np.abs((pat_mat1 - pat_mat2)) ** self.p_norm)) ** (1 / self.p_norm)) | This function defines the distance we use between two pattern matrices. | measures.py | distance | BenoitCorsini/music-patterns | 1 | python | def distance(self, pat_mat1, pat_mat2):
'\n \n '
return (np.mean((np.abs((pat_mat1 - pat_mat2)) ** self.p_norm)) ** (1 / self.p_norm)) | def distance(self, pat_mat1, pat_mat2):
'\n \n '
return (np.mean((np.abs((pat_mat1 - pat_mat2)) ** self.p_norm)) ** (1 / self.p_norm))<|docstring|>This function defines the distance we use between two pattern matrices.<|endoftext|> |
5921109c2c0e4b53fb0c4704fc95e3c522af1cc2dc75d1d1ec32bd46ea2f75d3 | def compute_batch(self):
"\n This function computes the distances of a single batch.\n It starts by normalizing all the matrices of the batch in the list 'batch_pat_mat'.\n Then it computes the distance between the songs of the batch and all the other songs.\n This process computes 'dists' by columns.\n "
start_time = time()
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
start_index = np.where(uncomputed_columns)[0][0]
indices = np.arange(0, self.normalized_size, dtype=int)
indices = np.reshape(indices, (1, self.normalized_size))
indices = np.repeat(indices, self.normalized_size, axis=0)
dists_to_compute = self.song_list_indexed[start_index:(start_index + self.batch_size)]
batch_pat_mat = []
for (index_song, song) in dists_to_compute:
pat_mat = process(np.loadtxt(osp.join(self.mat_dir, (song + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat = pat_mat[(norm_indices, norm_indices.T)]
batch_pat_mat.append((index_song, pat_mat))
time_spent = time_to_string((time() - start_time))
print('Batch ready, start computing the distance ({})'.format(time_spent))
start_time = time()
index_song = 1
for (index_song1, song1) in self.song_list_indexed[:start_index]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, pat_mat1) in batch_pat_mat:
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, song1) in self.song_list_indexed[(start_index + self.batch_size):]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
sys.stdout.write('\x1b[F\x1b[K')
np.savetxt(osp.join(self.res_dir, 'dists.txt'), self.dists, delimiter='\t')
return time_to_string((time() - start_time)) | This function computes the distances of a single batch.
It starts by normalizing all the matrices of the batch in the list 'batch_pat_mat'.
Then it computes the distance between the songs of the batch and all the other songs.
This process computes 'dists' by columns. | measures.py | compute_batch | BenoitCorsini/music-patterns | 1 | python | def compute_batch(self):
"\n This function computes the distances of a single batch.\n It starts by normalizing all the matrices of the batch in the list 'batch_pat_mat'.\n Then it computes the distance between the songs of the batch and all the other songs.\n This process computes 'dists' by columns.\n "
start_time = time()
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
start_index = np.where(uncomputed_columns)[0][0]
indices = np.arange(0, self.normalized_size, dtype=int)
indices = np.reshape(indices, (1, self.normalized_size))
indices = np.repeat(indices, self.normalized_size, axis=0)
dists_to_compute = self.song_list_indexed[start_index:(start_index + self.batch_size)]
batch_pat_mat = []
for (index_song, song) in dists_to_compute:
pat_mat = process(np.loadtxt(osp.join(self.mat_dir, (song + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat = pat_mat[(norm_indices, norm_indices.T)]
batch_pat_mat.append((index_song, pat_mat))
time_spent = time_to_string((time() - start_time))
print('Batch ready, start computing the distance ({})'.format(time_spent))
start_time = time()
index_song = 1
for (index_song1, song1) in self.song_list_indexed[:start_index]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, pat_mat1) in batch_pat_mat:
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, song1) in self.song_list_indexed[(start_index + self.batch_size):]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
sys.stdout.write('\x1b[F\x1b[K')
np.savetxt(osp.join(self.res_dir, 'dists.txt'), self.dists, delimiter='\t')
return time_to_string((time() - start_time)) | def compute_batch(self):
"\n This function computes the distances of a single batch.\n It starts by normalizing all the matrices of the batch in the list 'batch_pat_mat'.\n Then it computes the distance between the songs of the batch and all the other songs.\n This process computes 'dists' by columns.\n "
start_time = time()
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
start_index = np.where(uncomputed_columns)[0][0]
indices = np.arange(0, self.normalized_size, dtype=int)
indices = np.reshape(indices, (1, self.normalized_size))
indices = np.repeat(indices, self.normalized_size, axis=0)
dists_to_compute = self.song_list_indexed[start_index:(start_index + self.batch_size)]
batch_pat_mat = []
for (index_song, song) in dists_to_compute:
pat_mat = process(np.loadtxt(osp.join(self.mat_dir, (song + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat = pat_mat[(norm_indices, norm_indices.T)]
batch_pat_mat.append((index_song, pat_mat))
time_spent = time_to_string((time() - start_time))
print('Batch ready, start computing the distance ({})'.format(time_spent))
start_time = time()
index_song = 1
for (index_song1, song1) in self.song_list_indexed[:start_index]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, pat_mat1) in batch_pat_mat:
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
for (index_song1, song1) in self.song_list_indexed[(start_index + self.batch_size):]:
pat_mat1 = process(np.loadtxt(osp.join(self.mat_dir, (song1 + '.txt')), delimiter='\t'))
n_measures = np.size(pat_mat1, axis=0)
norm_indices = np.floor(((indices * n_measures) / self.normalized_size)).astype(int)
pat_mat1 = pat_mat1[(norm_indices, norm_indices.T)]
for (index_song2, pat_mat2) in batch_pat_mat:
self.dists[(index_song1, index_song2)] = self.distance(pat_mat1, pat_mat2)
time_spent = time_to_string((time() - start_time))
perc = int(((100.0 * index_song) / self.n_songs))
sys.stdout.write('\x1b[F\x1b[K')
print('{}% of the distance computed ({})'.format(perc, time_spent))
index_song += 1
sys.stdout.write('\x1b[F\x1b[K')
np.savetxt(osp.join(self.res_dir, 'dists.txt'), self.dists, delimiter='\t')
return time_to_string((time() - start_time))<|docstring|>This function computes the distances of a single batch.
It starts by normalizing all the matrices of the batch in the list 'batch_pat_mat'.
Then it computes the distance between the songs of the batch and all the other songs.
This process computes 'dists' by columns.<|endoftext|> |
f5ef3501134404f13bad6d704784c04390d1a129ff73427895293156c490aa13 | def compute(self):
"\n This function computes the distance matrix one batch a time.\n Note that if 'batch_size' x 'n_batch' < 'n_songs' then the distance matrix will not be filled.\n In this situation, the algorithm can be started from the previous stopping time by setting 'intialize_distances' to False\n "
start_time = time()
print('Distance Matrix starting...')
print(self.output)
for i in range(self.n_batch):
time_spent = time_to_string((time() - start_time))
print('Batch {} of {} starting... ({})'.format((i + 1), self.n_batch, time_spent))
batch_time = self.compute_batch()
sys.stdout.write('\x1b[F\x1b[K')
print('Batch {} of {} done ({})'.format((i + 1), self.n_batch, batch_time))
time_algorithm = time_to_string((time() - start_time))
print('Distance Matrix executed in {}'.format(time_algorithm))
print("Matrix available as '{}'".format(osp.join(self.res_dir, 'dists.txt')))
self.check() | This function computes the distance matrix one batch a time.
Note that if 'batch_size' x 'n_batch' < 'n_songs' then the distance matrix will not be filled.
In this situation, the algorithm can be started from the previous stopping time by setting 'intialize_distances' to False | measures.py | compute | BenoitCorsini/music-patterns | 1 | python | def compute(self):
"\n This function computes the distance matrix one batch a time.\n Note that if 'batch_size' x 'n_batch' < 'n_songs' then the distance matrix will not be filled.\n In this situation, the algorithm can be started from the previous stopping time by setting 'intialize_distances' to False\n "
start_time = time()
print('Distance Matrix starting...')
print(self.output)
for i in range(self.n_batch):
time_spent = time_to_string((time() - start_time))
print('Batch {} of {} starting... ({})'.format((i + 1), self.n_batch, time_spent))
batch_time = self.compute_batch()
sys.stdout.write('\x1b[F\x1b[K')
print('Batch {} of {} done ({})'.format((i + 1), self.n_batch, batch_time))
time_algorithm = time_to_string((time() - start_time))
print('Distance Matrix executed in {}'.format(time_algorithm))
print("Matrix available as '{}'".format(osp.join(self.res_dir, 'dists.txt')))
self.check() | def compute(self):
"\n This function computes the distance matrix one batch a time.\n Note that if 'batch_size' x 'n_batch' < 'n_songs' then the distance matrix will not be filled.\n In this situation, the algorithm can be started from the previous stopping time by setting 'intialize_distances' to False\n "
start_time = time()
print('Distance Matrix starting...')
print(self.output)
for i in range(self.n_batch):
time_spent = time_to_string((time() - start_time))
print('Batch {} of {} starting... ({})'.format((i + 1), self.n_batch, time_spent))
batch_time = self.compute_batch()
sys.stdout.write('\x1b[F\x1b[K')
print('Batch {} of {} done ({})'.format((i + 1), self.n_batch, batch_time))
time_algorithm = time_to_string((time() - start_time))
print('Distance Matrix executed in {}'.format(time_algorithm))
print("Matrix available as '{}'".format(osp.join(self.res_dir, 'dists.txt')))
self.check()<|docstring|>This function computes the distance matrix one batch a time.
Note that if 'batch_size' x 'n_batch' < 'n_songs' then the distance matrix will not be filled.
In this situation, the algorithm can be started from the previous stopping time by setting 'intialize_distances' to False<|endoftext|> |
efa92f0a437b518dd484a238524dd4b1cd2e921825999f0cd1f4239ad01928e1 | def check_completion(self):
"\n This function checks if all the columns of 'dists' are computed.\n "
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
print('\x1b[1;37;46m!!!\x1b[0;38;40m The matrix is not fully computed \x1b[1;37;46m!!!\x1b[0;38;40m')
return False
else:
return True | This function checks if all the columns of 'dists' are computed. | measures.py | check_completion | BenoitCorsini/music-patterns | 1 | python | def check_completion(self):
"\n \n "
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
print('\x1b[1;37;46m!!!\x1b[0;38;40m The matrix is not fully computed \x1b[1;37;46m!!!\x1b[0;38;40m')
return False
else:
return True | def check_completion(self):
"\n \n "
uncomputed_columns = np.all((self.dists == 0), axis=0)
if np.any(uncomputed_columns):
print('\x1b[1;37;46m!!!\x1b[0;38;40m The matrix is not fully computed \x1b[1;37;46m!!!\x1b[0;38;40m')
return False
else:
return True<|docstring|>This function checks if all the columns of 'dists' are computed.<|endoftext|> |
2af286d1aa78068d4ea0a05ac63dfffd41162eb61e20d3d5609ce7c58e64156a | def check_values(self):
"\n This function checks if all the entries of 'dists' are between 0 and 1.\n "
check_values = ((self.dists >= 0) & (self.dists < 1))
if np.all(check_values):
print('\x1b[1;37;42mCheck\x1b[0;38;40m no values outside of the range')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_values[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m The distance between '{}' and '{}' is {}".format(song1, song2, self.dists[(index_song1, index_song2)])) | This function checks if all the entries of 'dists' are between 0 and 1. | measures.py | check_values | BenoitCorsini/music-patterns | 1 | python | def check_values(self):
"\n \n "
check_values = ((self.dists >= 0) & (self.dists < 1))
if np.all(check_values):
print('\x1b[1;37;42mCheck\x1b[0;38;40m no values outside of the range')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_values[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m The distance between '{}' and '{}' is {}".format(song1, song2, self.dists[(index_song1, index_song2)])) | def check_values(self):
"\n \n "
check_values = ((self.dists >= 0) & (self.dists < 1))
if np.all(check_values):
print('\x1b[1;37;42mCheck\x1b[0;38;40m no values outside of the range')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_values[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m The distance between '{}' and '{}' is {}".format(song1, song2, self.dists[(index_song1, index_song2)]))<|docstring|>This function checks if all the entries of 'dists' are between 0 and 1.<|endoftext|> |
f71f94f8568fb9a4d3ece5be3e7b88afa1b381ba9e6517d1bf3ef9615b133551 | def check_symmetry(self):
"\n This function checks if the matrix 'dists' is symmetric.\n "
check_symmetry = (self.dists == self.dists.T)
if np.all(check_symmetry):
print('\x1b[1;37;42mCheck\x1b[0;38;40m the matrix is symmetric')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_symmetry[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m There is an asymetry between '{}' and '{}' : {} and {}".format(song1, song2, self.dists[(index_song1, index_song2)], self.dists[(index_song2, index_song1)])) | This function checks if the matrix 'dists' is symmetric. | measures.py | check_symmetry | BenoitCorsini/music-patterns | 1 | python | def check_symmetry(self):
"\n \n "
check_symmetry = (self.dists == self.dists.T)
if np.all(check_symmetry):
print('\x1b[1;37;42mCheck\x1b[0;38;40m the matrix is symmetric')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_symmetry[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m There is an asymetry between '{}' and '{}' : {} and {}".format(song1, song2, self.dists[(index_song1, index_song2)], self.dists[(index_song2, index_song1)])) | def check_symmetry(self):
"\n \n "
check_symmetry = (self.dists == self.dists.T)
if np.all(check_symmetry):
print('\x1b[1;37;42mCheck\x1b[0;38;40m the matrix is symmetric')
else:
for (index_song1, song1) in self.song_list_indexed:
for (index_song2, song2) in self.song_list_indexed:
if (index_song1 <= index_song2):
if (not check_symmetry[(index_song1, index_song2)]):
print("\x1b[1;31;43mERROR!\x1b[0;38;40m There is an asymetry between '{}' and '{}' : {} and {}".format(song1, song2, self.dists[(index_song1, index_song2)], self.dists[(index_song2, index_song1)]))<|docstring|>This function checks if the matrix 'dists' is symmetric.<|endoftext|> |
a09e1f6d32c703f12429fb47ce74e862a285338a8d531116635e4f1c2c934f37 | def check(self):
'\n This function runs the different checks.\n '
if self.check_completion():
self.check_values()
self.check_symmetry() | This function runs the different checks. | measures.py | check | BenoitCorsini/music-patterns | 1 | python | def check(self):
'\n \n '
if self.check_completion():
self.check_values()
self.check_symmetry() | def check(self):
'\n \n '
if self.check_completion():
self.check_values()
self.check_symmetry()<|docstring|>This function runs the different checks.<|endoftext|> |
01ccf75b16f4554e62ffd89a76fd4225528b1c16963154ebdaaecb36ad172cf9 | def __init__(self, initial_state=None):
'State is an abstract representation of the state\n of the world, and seq is the list of actions required\n to get to a particular state from the initial state(root).'
self.state = initial_state
self.seq = [] | State is an abstract representation of the state
of the world, and seq is the list of actions required
to get to a particular state from the initial state(root). | part_a_archive/artificial_idiot/agent.py | __init__ | Dovermore/artificial_intelligence_project | 0 | python | def __init__(self, initial_state=None):
'State is an abstract representation of the state\n of the world, and seq is the list of actions required\n to get to a particular state from the initial state(root).'
self.state = initial_state
self.seq = [] | def __init__(self, initial_state=None):
'State is an abstract representation of the state\n of the world, and seq is the list of actions required\n to get to a particular state from the initial state(root).'
self.state = initial_state
self.seq = []<|docstring|>State is an abstract representation of the state
of the world, and seq is the list of actions required
to get to a particular state from the initial state(root).<|endoftext|> |
4a6f390d7766e924ded4d8116f158b761d89a6d9ec5dfeffac651b09bcb99ac8 | def __call__(self, percept):
'[Figure 3.1] Formulate a goal and problem, then\n search for a sequence of actions to solve it.'
self.state = self.update_state(self.state, percept)
if (not self.seq):
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if (not self.seq):
return None
return self.seq.pop(0) | [Figure 3.1] Formulate a goal and problem, then
search for a sequence of actions to solve it. | part_a_archive/artificial_idiot/agent.py | __call__ | Dovermore/artificial_intelligence_project | 0 | python | def __call__(self, percept):
'[Figure 3.1] Formulate a goal and problem, then\n search for a sequence of actions to solve it.'
self.state = self.update_state(self.state, percept)
if (not self.seq):
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if (not self.seq):
return None
return self.seq.pop(0) | def __call__(self, percept):
'[Figure 3.1] Formulate a goal and problem, then\n search for a sequence of actions to solve it.'
self.state = self.update_state(self.state, percept)
if (not self.seq):
goal = self.formulate_goal(self.state)
problem = self.formulate_problem(self.state, goal)
self.seq = self.search(problem)
if (not self.seq):
return None
return self.seq.pop(0)<|docstring|>[Figure 3.1] Formulate a goal and problem, then
search for a sequence of actions to solve it.<|endoftext|> |
1d65680c45c72bafbb05c660b4a255f903992893312f7f1cfdfeb3836c1d417a | def get_all_query():
'Get URL parameter (also known as "query strings" or "URL query parameters") as a dict'
query = eval_js('Object.fromEntries(new URLSearchParams(window.location.search))')
return query | Get URL parameter (also known as "query strings" or "URL query parameters") as a dict | pywebio_battery/web.py | get_all_query | pywebio/pywebio-battery | 2 | python | def get_all_query():
query = eval_js('Object.fromEntries(new URLSearchParams(window.location.search))')
return query | def get_all_query():
query = eval_js('Object.fromEntries(new URLSearchParams(window.location.search))')
return query<|docstring|>Get URL parameter (also known as "query strings" or "URL query parameters") as a dict<|endoftext|> |
a2944252cc767a227a6e29c60eaae05a7dbfc95ecd27d6684030b76ac1eadb70 | def get_query(name):
'Get URL parameter value'
query = eval_js('new URLSearchParams(window.location.search).get(n)', n=name)
return query | Get URL parameter value | pywebio_battery/web.py | get_query | pywebio/pywebio-battery | 2 | python | def get_query(name):
query = eval_js('new URLSearchParams(window.location.search).get(n)', n=name)
return query | def get_query(name):
query = eval_js('new URLSearchParams(window.location.search).get(n)', n=name)
return query<|docstring|>Get URL parameter value<|endoftext|> |
c4f1a860806cdecb362fc720f5d7c6e8b1cce0f556f6cd5eb6d6bd37e2d7eaaf | def set_localstorage(key, value):
"Save data to user's web browser\n\n The data is specific to the origin (protocol+domain+port) of the app.\n Different origins use different web browser local storage.\n\n :param str key: the key you want to create/update.\n :param str value: the value you want to give the key you are creating/updating.\n\n You can read the value by using :func:`get_localstorage(key) <get_localstorage>`\n "
run_js('localStorage.setItem(key, value)', key=key, value=value) | Save data to user's web browser
The data is specific to the origin (protocol+domain+port) of the app.
Different origins use different web browser local storage.
:param str key: the key you want to create/update.
:param str value: the value you want to give the key you are creating/updating.
You can read the value by using :func:`get_localstorage(key) <get_localstorage>` | pywebio_battery/web.py | set_localstorage | pywebio/pywebio-battery | 2 | python | def set_localstorage(key, value):
"Save data to user's web browser\n\n The data is specific to the origin (protocol+domain+port) of the app.\n Different origins use different web browser local storage.\n\n :param str key: the key you want to create/update.\n :param str value: the value you want to give the key you are creating/updating.\n\n You can read the value by using :func:`get_localstorage(key) <get_localstorage>`\n "
run_js('localStorage.setItem(key, value)', key=key, value=value) | def set_localstorage(key, value):
"Save data to user's web browser\n\n The data is specific to the origin (protocol+domain+port) of the app.\n Different origins use different web browser local storage.\n\n :param str key: the key you want to create/update.\n :param str value: the value you want to give the key you are creating/updating.\n\n You can read the value by using :func:`get_localstorage(key) <get_localstorage>`\n "
run_js('localStorage.setItem(key, value)', key=key, value=value)<|docstring|>Save data to user's web browser
The data is specific to the origin (protocol+domain+port) of the app.
Different origins use different web browser local storage.
:param str key: the key you want to create/update.
:param str value: the value you want to give the key you are creating/updating.
You can read the value by using :func:`get_localstorage(key) <get_localstorage>`<|endoftext|> |
6278ed116dd6506ba9e580fab95e2c4fbd3676f4b66da53e43005a399a2e1f68 | def get_localstorage(key) -> str:
"Get the key's value in user's web browser local storage"
return eval_js('localStorage.getItem(key)', key=key) | Get the key's value in user's web browser local storage | pywebio_battery/web.py | get_localstorage | pywebio/pywebio-battery | 2 | python | def get_localstorage(key) -> str:
return eval_js('localStorage.getItem(key)', key=key) | def get_localstorage(key) -> str:
return eval_js('localStorage.getItem(key)', key=key)<|docstring|>Get the key's value in user's web browser local storage<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.