input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
cleanup_guardduty(root_credentials, security_credentials, root_region, security_account_name, all_stacks)
cleanup_macie(root_credentials, security_credentials, root_region, security_account_name, all_stacks)
cleanup_cwl(all_stacks)
cleanup_parameter_store(all_stacks)
def cleanup_macie(root_credentials, security_credentials, root_region, security_account_name, all_stacks):
print("Cleaning up Macie")
try:
security_account_id = None
for a in all_stacks["Accounts"]:
if a["AccountName"] == security_account_name:
security_account_id = a["AccountId"]
macie_root = boto3.client("macie2",
region_name=root_region,
aws_access_key_id=root_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=root_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=root_credentials["Credentials"]["SessionToken"]
)
macie = boto3.client("macie2",
region_name=root_region,
aws_access_key_id=security_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=security_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=security_credentials["Credentials"]["SessionToken"]
)
for region in all_stacks["Regions"]:
try:
macie_r = boto3.client("macie2",
region_name=region,
aws_access_key_id=security_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=security_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=security_credentials["Credentials"]["SessionToken"]
)
member_accounts = macie_r.list_members()
for member in member_accounts["members"]:
memberId = member["accountId"]
print("Disassociate Member {} {}".format(region, memberId))
macie_r.disassociate_member(id=memberId)
print("Delete Member {} {}".format(region, memberId))
macie_r.delete_member(id=memberId)
except botocore.exceptions.ClientError as err:
print('Error Message: {} - {}'.format(err.response['Error']['Message'], region))
threads = list()
try:
print("Waiting for all Macie cleanup threads to finish...")
for account in all_stacks["Accounts"]:
for region in all_stacks["Regions"]:
t = threading.Thread(target=thread_macie_delete, args=(region, account["AdminRoleArn"], account["AccountId"]))
threads.append(t)
t.start()
finally:
for index, thread in enumerate(threads):
thread.join()
print("Done. All Macie cleanup threads finished.")
try:
macie_root.disable_organization_admin_account(
adminAccountId=security_account_id
)
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
def thread_macie_delete(region, admin_role, accountId):
sts = boto3.client("sts")
try:
credentials = sts.assume_role(
RoleArn=admin_role,
RoleSessionName="AcceleratorCleanupScript"
)
macie = boto3.client("macie2",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
try:
print("Disabling macie in {} for {}".format(region, accountId))
macie.disable_macie()
except botocore.exceptions.ClientError as err:
print('Error Message: {} - {} - {}'.format(err.response['Error']['Message'], accountId, region))
except botocore.exceptions.ClientError as err:
print('Disabling macie in {} for {}. Error Message: {}'.format(region, accountId, err.response['Error']['Message']))
def cleanup_guardduty(root_credentials, security_credentials, root_region, security_account_name, all_stacks):
print("Cleaning up GuardDuty")
try:
security_account_id = None
for a in all_stacks["Accounts"]:
if a["AccountName"] == security_account_name:
security_account_id = a["AccountId"]
guardduty_root = boto3.client("guardduty",
region_name=root_region,
aws_access_key_id=root_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=root_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=root_credentials["Credentials"]["SessionToken"]
)
guardduty = boto3.client("guardduty",
region_name=root_region,
aws_access_key_id=security_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=security_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=security_credentials["Credentials"]["SessionToken"]
)
for region in all_stacks["Regions"]:
try:
guardduty_r = boto3.client("guardduty",
region_name=region,
aws_access_key_id=security_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=security_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=security_credentials["Credentials"]["SessionToken"]
)
detectorIds = guardduty_r.list_detectors()
print("GuardDuty Detectors {} {}".format(region, detectorIds["DetectorIds"]))
for dId in detectorIds["DetectorIds"]:
member_accounts = guardduty_r.list_members(DetectorId=dId)
member_account_ids = list(map(lambda x: x["AccountId"], member_accounts["Members"]))
if len(member_account_ids) > 0:
print("GuardDuty Members {} {}".format(region, member_account_ids))
try:
guardduty_r.disassociate_members(
DetectorId=dId,
AccountIds=member_account_ids
)
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
try:
guardduty_r.delete_members(
DetectorId=dId,
AccountIds=member_account_ids
)
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
guardduty_root_r = boto3.client("guardduty",
region_name=region,
aws_access_key_id=root_credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=root_credentials["Credentials"]["SecretAccessKey"],
aws_session_token=root_credentials["Credentials"]["SessionToken"]
)
try:
print("Disabling organization admin account")
guardduty_root_r.disable_organization_admin_account(
AdminAccountId=security_account_id
)
print("Done. Disabling organization admin account")
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
print("Disabling guardduty in {} for {}".format(region, security_account_id))
try:
guardduty_r.delete_detector(DetectorId=dId)
print("Done. Disabling guardduty in {} for {}".format(region, security_account_id))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
threads = list()
try:
print("Waiting for all GuardDuty cleanup threads to finish...")
for account in all_stacks["Accounts"]:
for region in all_stacks["Regions"]:
t = threading.Thread(target=thread_guardduty_delete, args=(region, account["AdminRoleArn"], account["AccountId"]))
threads.append(t)
t.start()
finally:
for index, thread in enumerate(threads):
thread.join()
print("Done. All GuardDuty cleanup threads finished.")
try:
print("Disabling organization admin account")
guardduty_root.disable_organization_admin_account(
AdminAccountId=security_account_id
)
print("Done. Disabling organization admin account")
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
def thread_guardduty_delete(region, admin_role, accountId):
sts = boto3.client("sts")
try:
credentials = sts.assume_role(
RoleArn=admin_role,
RoleSessionName="AcceleratorCleanupScript"
)
guardduty = boto3.client("guardduty",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
print("Disabling guardduty in {} for {}".format(region, accountId))
try:
detectorIds = guardduty.list_detectors()
for dId in detectorIds["DetectorIds"]:
guardduty.delete_detector(DetectorId=dId)
print("Done. Disabling guardduty in {} for {}".format(region, accountId))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
except botocore.exceptions.ClientError as err:
print('Disabling macie in {} for {}. Error Message: {}'.format(region, accountId, err.response['Error']['Message']))
def cleanup_cwl(all_stacks):
print("Cleaning up CloudWatch Logs")
threads = list()
try:
print("Waiting for all CloudWatch Logs threads to finish...")
for account in all_stacks["Accounts"]:
for region in all_stacks["Regions"]:
t = threading.Thread(target=thread_cwl_cleanup, args=(region, account["AdminRoleArn"], account["AccountId"]))
threads.append(t)
t.start()
finally:
for index, thread in enumerate(threads):
thread.join()
print("Done. All CloudWatch Logs threads finished.")
def thread_cwl_cleanup(region, admin_role_arn, accountId):
sts = boto3.client("sts")
credentials = sts.assume_role(
RoleArn=admin_role_arn,
RoleSessionName="AcceleratorCleanupScript"
)
cwl = boto3.client("logs",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
log_groups = cwl.describe_log_groups()
while True:
for log_group in log_groups["logGroups"]:
if AcceleratorPrefix in log_group["logGroupName"]:
print("Deleting log group '{}' in {} for {}".format(log_group["logGroupName"], region, accountId))
cwl.delete_log_group(logGroupName=log_group["logGroupName"])
print("Deleted log group '{}' in {} for {}".format(log_group["logGroupName"], region, accountId))
if "nextToken" in log_groups and log_groups["nextToken"] is not None:
log_groups = cwl.describe_log_groups(nextToken=log_groups["nextToken"])
else:
break
def cleanup_parameter_store(all_stacks):
print("Cleanup SSM Parameters")
threads = list()
try:
print("Waiting for all SSM Parameter cleanup threads to finish...")
for account in all_stacks["Accounts"]:
for region in all_stacks["Regions"]:
t = threading.Thread(target=thread_parameter_store, args=(region, account["AdminRoleArn"], account["AccountId"]))
threads.append(t)
t.start()
finally:
for index, thread in enumerate(threads):
thread.join()
print("Done. All SSM Parameter cleanup threads finished.")
# todo cleanup the version
def thread_parameter_store(region, admin_role_arn, accountId):
sts = boto3.client("sts")
credentials = sts.assume_role(
RoleArn=admin_role_arn,
RoleSessionName="AcceleratorCleanupScript"
)
ssm = boto3.client("ssm",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
paginator = ssm.get_paginator('get_parameters_by_path')
page_iterator = paginator.paginate(Path="/{}/".format(AcceleratorPrefix), Recursive=True)
for ssm_parameters in page_iterator:
for ssm_parameter in ssm_parameters["Parameters"]:
print("Deleting ssm parameter '{}' in {} for {}".format(ssm_parameter["Name"], region, accountId))
ssm.delete_parameter(Name=ssm_parameter["Name"])
print("Deletedlog group '{}' in {} for {}".format(ssm_parameter["Name"], region, accountId))
def cleanup_route53_resolver(credentials, region):
print("cleanup_route53_resolver")
client = boto3.client("route53resolver",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
associations = client.list_resolver_rule_associations()
for association in associations["ResolverRuleAssociations"]:
if "Name" in association and association["Name"] == "System Rule Association":
continue
try:
print("Disassociating ResolverRule '{}' for VPC '{}'".format(association["ResolverRuleId"], association["VPCId"]))
client.disassociate_resolver_rule(
VPCId=association["VPCId"],
ResolverRuleId=association["ResolverRuleId"]
)
print("Done. Disassociating ResolverRule '{}' for VPC '{}'".format(association["ResolverRuleId"], association["VPCId"]))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
resolver_rules = client.list_resolver_rules()
for resolver_rule in resolver_rules["ResolverRules"]:
for resolver_rule in resolver_rules["ResolverRules"]:
if resolver_rule["OwnerId"] != "Route 53 Resolver":
try:
print("Deleting ResolverRule '{}'".format(resolver_rule["Id"]))
client.delete_resolver_rule(
ResolverRuleId=resolver_rule["Id"]
)
print("Done. Deleting ResolverRule '{}'".format(resolver_rule["Id"]))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
print("Done. cleanup_route53_resolver")
def cleanup_directory_sharing(credentials, region, mad_dns_domain):
client = boto3.client("ds",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
directories = client.describe_directories()
for directory in directories["DirectoryDescriptions"]:
if directory["Name"] == mad_dns_domain:
shared_directories = client.describe_shared_directories(
OwnerDirectoryId=directory["DirectoryId"]
)
for shared_directory in shared_directories["SharedDirectories"]:
try:
print("Unsharing directory {} to {}".format(directory["DirectoryId"], shared_directory["SharedAccountId"]))
client.unshare_directory(
DirectoryId=directory["DirectoryId"],
UnshareTarget={
'Id': shared_directory["SharedAccountId"],
'Type': 'ACCOUNT'
}
)
print("Done. Unsharing directory {} to {}".format(directory["DirectoryId"], shared_directory["SharedAccountId"]))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
def cleanup_directory_sharing_load_config():
print("cleanup_directory_sharing")
mad_account = ""
admin_role = ""
root_region = ""
mad_dns_domain = ""
with open('config.json') as json_file:
config = json.load(json_file)
admin_role = config["global-options"]["organization-admin-role"]
root_region = config["global-options"]["aws-org-management"]["region"]
if root_region == "${HOME_REGION}":
my_session = boto3.session.Session()
root_region = my_session.region_name
print("Setting region to '{}'".format(root_region))
mad_account_name = config["global-options"]["central-operations-services"]["account"]
mad_account = config["mandatory-account-configs"][mad_account_name]["account-name"]
if "mad" not in config["mandatory-account-configs"][mad_account_name]["deployments"]:
return "mad not configured"
elif config["mandatory-account-configs"][mad_account_name]["deployments"]["mad"] == False:
return "mad not configured"
mad_dns_domain = config["mandatory-account-configs"][mad_account_name]["deployments"]["mad"]["dns-domain"]
accounts = get_accounts()
# find the cenral_resolver_rule_account
mad_account_id = None
for account in accounts:
if account["Name"] == mad_account:
mad_account_id = account["Id"]
break
if mad_account_id is not None:
mad_account_creds = sts_credentials(mad_account_id, admin_role)
cleanup_directory_sharing(mad_account_creds, root_region, mad_dns_domain)
#Cleanup AD Connector in root account
cleanup_ad_connectors(root_region, mad_dns_domain)
print("Done. cleanup_directory_sharing")
def cleanup_ad_connectors(region, mad_dns_domain):
client = boto3.client("ds",
region_name=region
)
directories = client.describe_directories()
for directory in directories["DirectoryDescriptions"]:
if directory["Name"] == mad_dns_domain:
print("Cleaning up {}".format(directory["Name"]))
client.delete_directory(
DirectoryId=directory["DirectoryId"]
)
print("Done.Cleaning up {}".format(directory["Name"]))
def cleanup_route53_resolver_load_config():
central_resolver_rule_account = ""
admin_role = ""
root_region = ""
with open('config.json') as json_file:
config = json.load(json_file)
admin_role = config["global-options"]["organization-admin-role"]
root_region = config["global-options"]["aws-org-management"]["region"]
central_account_name = config["global-options"]["central-operations-services"]["account"]
if "mad" not in config["mandatory-account-configs"][central_account_name]["deployments"]:
return "mad not configured"
elif config["mandatory-account-configs"][central_account_name]["deployments"]["mad"] == False:
return "mad not configured"
central_resolver_rule_account = config["mandatory-account-configs"][central_account_name]["deployments"]["mad"]["central-resolver-rule-account"]
accounts = get_accounts()
# find the cenral_resolver_rule_account
central_resolver_rule_account_id = None
for account in accounts:
if account["Name"] == central_resolver_rule_account:
print("Found {}".format(central_resolver_rule_account))
central_resolver_rule_account_id = account["Id"]
break
if central_resolver_rule_account_id is not None:
central_resolver_rule_account_creds = sts_credentials(central_resolver_rule_account_id, admin_role)
cleanup_route53_resolver(central_resolver_rule_account_creds, root_region)
def cleanup_ecr(credentials, region):
print("Cleaning up ECR")
client = boto3.client("ecr",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
def cleanup_dynamodb(credentials, region):
print("Cleaning up DynamoDB")
client = boto3.client("dynamodb",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
tables = client.list_tables()
for tableName in tables["TableNames"]:
if tableName.startswith(AcceleratorPrefix):
print("Deleting DynamoDB Table '{}'".format(tableName))
client.delete_table(TableName=tableName)
print("Deleted DynamoDB Table '{}'".format(tableName))
def cleanup_secrets(credentials, region):
print("Cleaning up")
def cleanup_config_aggregators(credentials, region):
print("Cleaning up config aggregators")
def sts_credentials(accountId, roleName):
role_arn = "arn:aws:iam::{accountId}:role/{roleName}".format(accountId=accountId, roleName=roleName)
sts = boto3.client("sts")
credentials = sts.assume_role(
RoleArn=role_arn,
RoleSessionName="AcceleratorCleanupScript"
)
return credentials
def backup_config():
cc = boto3.client('codecommit')
repos = cc.list_repositories()
backed_up = False
if not path.exists("config.json"):
print("Backing up config.json from CodeCommit...")
try:
for repo in repos["repositories"]:
if AcceleratorPrefix != 'ASEA':
| |
<filename>ichooseyou_sep.py
import pandas
import numpy
#formats data from pmx gets relevant data
def format_pmx_list(data):
All_living_founder_list = []
Founders = []
Founder_contrib = []
founder_list = data["MyFounders"]
for every_founder in founder_list:
remove_format = every_founder.replace("|", ",").strip("[]")
formatted_founder_list = remove_format.split(",")
Founders.append(formatted_founder_list)
for every_founder in formatted_founder_list:
if every_founder not in All_living_founder_list:
All_living_founder_list.append(every_founder)
founder_contrib_list = data["MyFounderContribs"]
for every_contrib in founder_contrib_list:
remove_format_contrib = every_contrib.replace("|", ",").strip("[]")
formatted_contrib_list = remove_format_contrib.split(",")
Founder_contrib.append(formatted_contrib_list)
data["MyFounders"] = Founders
data["MyFounderContribs"] = Founder_contrib
All_living_founder_list.append('Unk')
return(All_living_founder_list, data)
#counts number of founders
def count_founders(data):
Number_founders = []
for founder_list in data["MyFounders"]:
Number_founders.append(len(founder_list))
data["Number of founders"] = Number_founders
return(data)
#converts proportion of founders to percentage
def convert_founder_percentage(data):
myfound_list = data["MyFounders"]
mycontrib_list = data["MyFounderContribs"] # the proportion each tributes to that individual
unk_proportion = []
unk_founder = []
percentage_contrib = []
founder_genome = []
for x in range(0,len(myfound_list)):
individual = mycontrib_list[x] # take list of proportions for that individual
percentage = [float(prop) * 100 for prop in individual]
percentage = [round(value,2) for value in percentage] # round each percentage in list
fi = founder_genome_equiv(individual)
founder_genome.append(fi)
if sum(percentage) >= 100:
percentage_contrib.append(percentage) # append list of percentages to percentage contrib
found_list = myfound_list[x] # the list of founders is the list of founders for that individual
unk_founder.append(found_list) # add list to unk.founder ( this is just to make the same variable name for later)
unk_proportion.append(individual)# see comment above
#found_list = ", ".join(found_list)
else:
unk_contrib = 100 - sum(percentage) # unk contribution is equal to 100 - sum percentages
unk_prop = unk_contrib/100 # creates unkown proportion (needed for fi)
individual.append(unk_prop) # add unkwon prop to list
unk_proportion.append(individual) # create list of list length number individuals
percentage.append(round(float(unk_contrib), 3)) # append this number to percentages as a rounded float
percentage_contrib.append(percentage) # append list of percentages to percentage contrib
found_list = myfound_list[x] # the list of founders is the list of founders for that individual
found_list.append("Unk")# append Unk to list of founders
unk_founder.append(found_list)
#found_list = ", ".join(found_list)
data["MyFounderContribs"] = unk_proportion
data["FounderContribution(%)"] = percentage_contrib # new column percentage contribution = percentage contribtution
data["MyFounders"] = unk_founder # new column MyFounders = unk founders
data["Fe"] = founder_genome
data = data.round(3)# round all data
return(data)
#creates dataset for use with R with unique id and foudner info
def create_dataset(data_frame):
empty_array = ([['UniqueID'],["FounderContribution(%)"], ['Founder']])
for index, row in data_frame.iterrows():
ID = []
Perc_contrib = []
Founder_name = []
Ind, Percent, Founder = (row['UniqueID'], row["FounderContribution(%)"], row['MyFounders'])
[Perc_contrib.append(x) for x in Percent]
[Founder_name.append(y) for y in Founder]
[ID.append(Ind) for x in Percent]
stacked = numpy.vstack((ID, Perc_contrib, Founder_name))
empty_array = numpy.hstack((empty_array, stacked))
df1 = pandas.DataFrame(empty_array, index = ['UniqueID',"FounderContribution(%)",'Founder'])
df = df1.transpose()
df = df.drop(df.index[0])
return(df)
#calculatae fe
def founder_genome_equiv(proportions):
values_squared = [float(value)**2 for value in proportions]
sum_values = sum(values_squared)
fe = 1/sum_values
fe = round(fe, 3)
return(fe)
#formats kinship matrix from pmx download
def format_matrix_from_studbook(csv_file):
column = []
kinship_matrix = pandas.read_csv(csv_file)
kinship_matrix.drop(index = 0, inplace = True)
kinship_matrix.drop(kinship_matrix.columns[1], axis = 1, inplace = True)
kinship_matrix['UniqueID'] = kinship_matrix['UniqueID'].str.strip()
kinship_matrix.set_index('UniqueID', inplace = True)
columns = list(kinship_matrix.columns.values)
for name in columns:
name = name.strip()
name = str(name)
column.append(name)
kinship_matrix.columns = column
return(kinship_matrix)
#makes ID to index
def change_index_to_ID(data):
data['UniqueID'] = data['UniqueID'].astype(str)
new_data = data.set_index('UniqueID', inplace = False, drop = True)
new_data.rename(index=str, inplace=True)
ranked_birds = list(new_data.index.values)
return(new_data, ranked_birds)
#deletes individuals found in undesirable list from data
def delete_too_related(data, undesirable_list):
for bird in undesirable_list:
if bird in list(data.index.values):
data.drop(index = bird, inplace = True) # MAKE BOTH STRINGS this is only necessary as unique ID numbers if not we will crash
else:
continue
return(data)
#any individual more than threshold gets removed from KM
def remove_related_from_kinship_matrix(kinship_matrix, undesirable_list):
KM = delete_too_related(kinship_matrix, undesirable_list)
for bird in undesirable_list:
KM.drop(columns = bird , inplace = True)
return(KM)
#sees how related (pariwise relatedness) using kinship matrix if fail and are more related than threshold added to list to remove from data and KM
def checking_relatedness(threshold, ID, kinship_matrix):
delete_individuals = []
ID = str(ID)
kin_coeff = kinship_matrix.loc[ID]
column_list = list(kin_coeff)
for x in range(0, len(kinship_matrix)):
cell_value = column_list[x]
value = float(cell_value)
column_name = kinship_matrix.columns[x]
if value >= threshold:
delete_individuals.append(column_name)
number_unsuitable = len(delete_individuals)
return(delete_individuals, number_unsuitable)
#MAIN ALGORITHM chooses individuals that satisfy requirements - number female, number male and less than relatedness threshold
#if any filtering needed then this is not used but rather chosen_animals_with_prior
def chosen_animals(threshold, number_males, number_females, data, kinship_matrix):
wanted_num = {'Male': number_males, 'Female': number_females}
total_wanted = number_males + number_females
counters = {'Male': 0, 'Female': 0}
the_chosen_ones = []
all_data, ranked_birds = change_index_to_ID(data)
# While we still have some individuals to find...
while (sum(counters.values()) < total_wanted and not all_data.empty):
# Get the next individual
individual = all_data.index.values[0]
# Check its sex.
indsex = all_data.at[individual, 'Sex']
# If we already have enough of this sex of individual, skip
if counters[indsex] < wanted_num[indsex]:
undesirable_list, number = checking_relatedness(threshold, individual, kinship_matrix)
the_chosen_ones.append(individual)
all_data = delete_too_related(all_data, undesirable_list)
kinship_matrix = remove_related_from_kinship_matrix(kinship_matrix, undesirable_list)
counters[indsex] += 1
else:
all_data.drop(index = individual, inplace = True)
ranked_data, ranked_birds = change_index_to_ID(data)
chosen_ones_tables = ranked_data.loc[the_chosen_ones]
chosen_ones_tables.reset_index(level=0, inplace=True)
return(the_chosen_ones, chosen_ones_tables)
#Formats studbook data to create dataframe containing only living individuals,
# then sorts the dataframe based on MK then Fe
def create_data_file(datafile):
panda = pandas.read_csv(datafile, index_col = None, usecols=["UniqueID", "Location", "Sex", "F", "MK", "AgeYears", "MyFounders", "MyFounderContribs", "Alive"])
all_live_found_list, data = format_pmx_list(panda)
data.query("Alive == True", inplace = True)
data.drop(columns = "Alive", inplace = True)
data.reset_index(inplace = True, drop = True)
data = count_founders(data)
data = convert_founder_percentage(data)
data = data.sort_values(["Fe","MK"], ascending=[False,True]) #"Fe", False
data['Rank'] = list(range(1, len(data) + 1))
return(data)
# This function is the same as "female_data_format", except it gets the
# table from R, rather than reading it in itself.
def female_data_format_df(df):
female = df.query("Sex == 'Female'")
female_data = female.sort_values(["Fe","MK"], ascending = [False,True]) #False, "Fe",
return(female_data)
# This function is the same as "male_data_format", except it gets the
# table from R, rather than reading it in itself.
def male_data_format_df(df):
male = df.query("Sex == 'Male'")
male_data = male.sort_values(["Fe","MK"], ascending=[False,True]) # "Fe", False,
return(male_data)
def female_data_format(datafile):
data = create_data_file(datafile)
female = data.query("Sex == 'Female'")
female_data = female.sort_values(["Fe","MK"], ascending=[False, True]) # "Fe" False
return(female_data)
def male_data_format(datafile):
data = create_data_file(datafile)
male = data.query("Sex == 'Male'")
male_data = male.sort_values(["Fe","MK"], ascending=[False,True]) # "Fe", False,
return(male_data)
#runs functions produces table of individuals chosen by ICY (table can be empty if cant satisfy all requirements) includes prior info
def chosen_ones_tables(datafile, kinship_matrix, threshold, number_males, number_females):
data = create_data_file(datafile)
kinship_matrix = format_matrix_from_studbook(kinship_matrix)
the_chosen_ones, the_chosen_ones_table = chosen_animals(threshold, number_males, number_females, data, kinship_matrix)
return(the_chosen_ones_table)
#ensures input to python function in a list
def coerce_to_list(value):
if value is None:
return []
if isinstance(value, list):
return value
else:
return [value]
#MAIN ALGORITHM chooses individuals that satisfy requirements - number female, number male and less than relatedness threshold BUT also makes sure no pairwise
#relatedness more than the threshold with prior individuals released or if you have individuals you want to ensure no others are related too more than the
#threshold
def chosen_animals_with_prior(threshold, number_males, number_females, data, kinship_matrix, age, prior_list, location_list):
# Possible None's prior_list and location_list.
# If prior_list is not None, then it is either a scalar string or a list of strings. Coerce so is always a list of strings.
prior_list = coerce_to_list(prior_list)
location_list = coerce_to_list(location_list)
wanted_num = {'Male': number_males, 'Female': number_females}
total_wanted = number_males + number_females
counters = {'Male': 0, 'Female': 0}
the_chosen_ones = []
all_data, ranked_birds = change_index_to_ID(data)
filter_all_data = filter_data(all_data, age, location_list)
# While we still have some individuals to find...
while (sum(counters.values()) < total_wanted and not filter_all_data.empty):
# Get the next individual
individual = filter_all_data.index.values[0]
print(individual)
check_if_unsuitable = checking_relatedness_named_ind(threshold, individual, kinship_matrix, prior_list)
if check_if_unsuitable:
all_data = delete_too_related(filter_all_data, check_if_unsuitable)
else:
# Check its sex.
indsex = filter_all_data.at[individual, 'Sex']
# If we already have enough of this sex of individual, skip
if counters[indsex] < wanted_num[indsex]:
undesirable_list, number = checking_relatedness(threshold, individual, kinship_matrix)
the_chosen_ones.append(individual)
all_data = delete_too_related(filter_all_data, undesirable_list)
kinship_matrix = remove_related_from_kinship_matrix(kinship_matrix, undesirable_list)
counters[indsex] += 1
else:
filter_all_data.drop(index = individual, inplace = True)
ranked_data, ranked_birds = change_index_to_ID(data)
chosen_ones_tables = ranked_data.loc[the_chosen_ones]
chosen_ones_tables.reset_index(level=0, inplace=True)
return(the_chosen_ones, chosen_ones_tables)
#runs functions produces table of individuals chosen by ICY (table can be empty if cant satisfy all requirements) includes prior info
def chosen_table_with_prior(datafile, kinship_matrix, threshold, number_males, number_females, age, prior_list, location):
data = create_data_file(datafile)
kinship_matrix = format_matrix_from_studbook(kinship_matrix)
the_chosen_ones, the_chosen_ones_table= chosen_animals_with_prior_2(threshold, number_males, number_females, data, kinship_matrix, | |
"""Representation of one chain (e.g. mainchain/sidechain)."""
from __future__ import annotations
import os
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Set, Union, cast
from xrpl.models import (
XRP,
AccountInfo,
AccountLines,
Currency,
GenericRequest,
IssuedCurrency,
Request,
Transaction,
)
from slk.chain.asset_aliases import AssetAliases
from slk.chain.key_manager import KeyManager
from slk.chain.node import Node
from slk.classes.account import Account
from slk.classes.config_file import ConfigFile
ROOT_ACCOUNT = Account(
nickname="root",
account_id="<KEY>",
seed="<KEY>",
)
class Chain(ABC):
"""Representation of one chain (e.g. mainchain/sidechain)."""
def __init__(self: Chain, node: Node, add_root: bool = True) -> None:
"""
Initializes a chain.
Note: Do not use this __init__, only use it with the child classes.
Args:
node: The node to use with this chain.
add_root: Specifies if the root account should be added to the key manager.
The default is True.
"""
self.node = node
self.key_manager = KeyManager()
self.asset_aliases = AssetAliases()
if add_root:
self.key_manager.add(ROOT_ACCOUNT)
@property
@abstractmethod
def standalone(self: Chain) -> bool:
"""Return whether the chain is in standalone mode."""
pass
@abstractmethod
def get_pids(self: Chain) -> List[int]:
"""Return a list of process IDs for the nodes in the chain."""
pass
@abstractmethod
def get_node(self: Chain, i: Optional[int] = None) -> Node:
"""
Get a specific node from the chain.
Args:
i: The index of the node to return.
Returns:
The node at index i.
"""
pass
@abstractmethod
def get_configs(self: Chain) -> List[ConfigFile]:
"""List all config files for the nodes in the chain."""
pass
@abstractmethod
def get_running_status(self: Chain) -> List[bool]:
"""Return whether the chain is up and running."""
pass
@abstractmethod
def shutdown(self: Chain) -> None:
"""Shut down the chain."""
pass
@abstractmethod
def servers_start(
self: Chain,
*,
server_indexes: Optional[Union[Set[int], List[int]]] = None,
server_out: str = os.devnull,
) -> None:
"""
Start the servers specified by `server_indexes` for the chain.
Args:
server_indexes: The server indexes to start. The default is `None`, which
starts all the servers in the chain.
server_out: Where to output the results.
"""
pass
@abstractmethod
def servers_stop(
self: Chain, server_indexes: Optional[Union[Set[int], List[int]]] = None
) -> None:
"""
Stop the servers specified by `server_indexes` for the chain.
Args:
server_indexes: The server indexes to start. The default is `None`, which
starts all the servers in the chain.
"""
pass
# rippled stuff
def send_signed(self: Chain, txn: Transaction) -> Dict[str, Any]:
"""
Sign and then send the given transaction.
Args:
txn: The transaction to sign and submit.
Returns:
The result of the submitted transaction.
Raises:
ValueError: If the transaction's account is not a known account.
"""
if not self.key_manager.is_account(txn.account):
raise ValueError(f"Account {txn.account} not a known account in chain.")
account_obj = self.key_manager.get_account(txn.account)
return self.node.sign_and_submit(txn, account_obj.wallet)
def request(self: Chain, req: Request) -> Dict[str, Any]:
"""
Send the request to the rippled server.
Args:
req: The request to send.
Returns:
The result of the request.
"""
return self.node.request(req)
# specific rippled methods
def maybe_ledger_accept(self: Chain) -> None:
"""Advance the ledger if the chain is in standalone mode."""
if not self.standalone:
return
self.request(GenericRequest(command="ledger_accept")) # type: ignore
def get_account_info(
self: Chain, account: Optional[Account] = None
) -> List[Dict[str, Any]]:
"""
Return a dictionary of account info. If account is None, use the address book
to return information about all accounts.
Args:
account: The account to get information about. If None, will return
information about all accounts in the chain. The default is None.
Returns:
A list of the results for the accounts.
Raises:
ValueError: If the account_info command fails.
"""
if account is None:
known_accounts = self.key_manager.known_accounts()
return [d for acc in known_accounts for d in self.get_account_info(acc)]
try:
result = self.request(AccountInfo(account=account.account_id))
except:
# TODO: better error checking
# Most likely the account does not exist on the ledger. Give a balance of 0.
return [
{
"account": account.account_id,
"balance": "0",
"flags": 0,
"owner_count": 0,
"previous_txn_id": "NA",
"previous_txn_lgr_seq": -1,
"sequence": -1,
}
]
if "account_data" not in result:
raise ValueError("Bad result from account_info command")
info = result["account_data"]
for dk in ["LedgerEntryType", "index"]:
del info[dk]
rename_dict = {
"Account": "account",
"Balance": "balance",
"Flags": "flags",
"OwnerCount": "owner_count",
"PreviousTxnID": "previous_txn_id",
"PreviousTxnLgrSeq": "previous_txn_lgr_seq",
"Sequence": "sequence",
}
for key in rename_dict:
if key in info:
new_key = rename_dict[key]
info[new_key] = info[key]
del info[key]
return [cast(Dict[str, Any], info)]
def get_balances(
self: Chain,
account: Union[Account, List[Account], None] = None,
token: Union[Currency, List[Currency]] = XRP(),
) -> List[Dict[str, Any]]:
"""
Get the balances for accounts in tokens.
Args:
account: An account or list of accounts to get balances of. If account is
None, treat as a wildcard (use address book). The default is None.
token: A token or list of tokens in which to get balances. If token is None,
treat as a wildcard. The default is None.
Returns:
A list of dictionaries of account balances.
"""
if account is None:
account = self.key_manager.known_accounts()
if isinstance(account, list):
return [d for acc in account for d in self.get_balances(acc, token)]
if isinstance(token, list):
return [d for ass in token for d in self.get_balances(account, ass)]
if isinstance(token, XRP):
try:
account_info = self.get_account_info(account)[0]
needed_data = ["account", "balance"]
account_info = {
"account": account_info["account"],
"balance": account_info["balance"],
}
account_info.update({"currency": "XRP", "peer": "", "limit": ""})
return [account_info]
except:
# TODO: better error handling
# Most likely the account does not exist on the ledger. Give a balance
# of zero.
return [
{
"account": account,
"balance": 0,
"currency": "XRP",
"peer": "",
"limit": "",
}
]
else:
assert isinstance(token, IssuedCurrency) # for typing
try:
trustlines = self.get_trust_lines(account)
trustlines = [
tl
for tl in trustlines
if (tl["peer"] == token.issuer and tl["currency"] == token.currency)
]
needed_data = ["account", "balance", "currency", "peer", "limit"]
return [
{k: trustline[k] for k in trustline if k in needed_data}
for trustline in trustlines
]
except:
# TODO: better error handling
# Most likely the account does not exist on the ledger. Return an empty
# data frame
return []
def get_balance(self: Chain, account: Account, token: Currency) -> str:
"""
Get a balance from a single account in a single token.
Args:
account: The account to get the balance from.
token: The currency to use as the balance.
Returns:
The balance of the token in the account.
"""
try:
result = self.get_balances(account, token)
return str(result[0]["balance"])
except:
return "0"
def get_trust_lines(
self: Chain, account: Account, peer: Optional[Account] = None
) -> List[Dict[str, Any]]:
"""
Get all trustlines for the specified account.
Args:
account: The account to query for the trustlines.
peer: The peer of the trustline. If None, treat as a wildcard. The default
is None.
Returns:
A list of dictionaries representing account trust lines.
Raises:
ValueError: If the account_lines command fails.
"""
if peer is None:
result = self.request(AccountLines(account=account.account_id))
else:
result = self.request(
AccountLines(account=account.account_id, peer=peer.account_id)
)
if "lines" not in result or "account" not in result:
raise ValueError("Bad result from account_lines command")
address = result["account"]
account_lines = result["lines"]
for account_line in account_lines:
account_line["peer"] = account_line["account"]
account_line["account"] = address
return cast(List[Dict[str, Any]], account_lines)
@abstractmethod
def get_brief_server_info(self: Chain) -> Dict[str, List[Dict[str, Any]]]:
"""
Get a dictionary of the server_state, validated_ledger_seq, and
complete_ledgers for all the nodes in the chain.
"""
pass
@abstractmethod
def federator_info(
self: Chain, server_indexes: Optional[Union[Set[int], List[int]]] = None
) -> Dict[int, Dict[str, Any]]:
"""
Get the federator info of the servers.
Args:
server_indexes: The servers to query for their federator info. If None,
treat as a wildcard. The default is None.
"""
pass
# Account/asset stuff
def create_account(self: Chain, name: str) -> Account:
"""
Create an account for the specified alias.
Args:
name: The alias to use for the account.
Returns:
The created account.
"""
assert not self.key_manager.is_alias(name)
account = Account.create(name)
self.key_manager.add(account)
return account
def substitute_nicknames(
self: Chain, items: Dict[str, Any], cols: List[str] = ["account", "peer"]
) -> None:
"""
Substitutes in-place account IDs for nicknames.
Args:
items: The dictionary to use for replacements.
cols: The columns in which to replace the account IDs. Defaults to "account"
and "peer".
"""
for c in cols:
if c not in items:
continue
items[c] = self.key_manager.alias_or_account_id(items[c])
def add_to_keymanager(self: Chain, account: Account) -> None:
"""
Add an account to the bank of known | |
1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, | |
0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, | |
<filename>src/inference/analyzers.py<gh_stars>0
"""
Class for the analysis of and to provide statistics about a single message.
**Intra-Message Analysis**
:author: <NAME>
"""
import numpy
import pandas
from bitarray import bitarray
from typing import Dict, List, Tuple, Union, Type
from scipy.ndimage.filters import gaussian_filter1d
# The analyzer implementations heavily depend on the MessageAnalyzer base class
# that itself is deeply intertwined with the MessageSegment class:
from inference.segments import MessageAnalyzer, MessageSegment
class NothingToCompareError(ValueError):
"""
Error to raise if one of a pair of data is missing for comparison.
"""
pass
class ParametersNotSet(ValueError):
"""
Error to raise if the necessary analysis parameters are not set.
"""
pass
class NoneAnalysis(MessageAnalyzer):
"""
Class denoting a non-transforming analysis.
Values remain None and only message is set.
"""
def analyze(self):
pass
class BitCongruence(MessageAnalyzer):
"""
Bitwise congruence: Simple Matching [Sokal & Michener].
not unit-dependant, always byte-wise
"""
_startskip = 1
def analyze(self):
"""
Bitwise congruence: Simple Matching [Sokal & Michener].
other kinds of bit variances from http://btluke.com/binclus.html
:return: list of congruences from index i = 1 to n between bits of i-1 and i
"""
tokenlist = self._message.data
self._values = BitCongruence.bitCongruenceBetweenTokens(tokenlist)
super().analyze()
@staticmethod
def bitCongruenceBetweenTokens(tokenlist: Union[List, bytes]):
"""
Bitwise congruence: Simple Matching [Sokal & Michener]
not unit-dependant, token-dependent: always compares tokenwise
:param tokenlist: list of tokens between which the bit congruence is calculated
:return: list of congruences from index i = 1 to n between bits of i-1 and i
"""
congruencelist = [] # tokenlist could also be list of ngrams.
if len(tokenlist) < 2:
raise NothingToCompareError(
"Needs at least two tokens to determine a congruence. Token list is {}".format(tokenlist))
try: # We need a type that can be casted to byte. Do it as soon as possible to fail early and completely.
for tokenA, tokenB in zip(tokenlist[:-1], tokenlist[1:]):
# converting and failsafes. Ugly bytes and bitarray!
if not isinstance(tokenA, bytes):
tokenA = bytes( [ tokenA] )
bitsA = bitarray()
bitsA.frombytes(tokenA)
if not isinstance(tokenB, bytes):
tokenB = bytes( [tokenB] )
bitsB = bitarray()
bitsB.frombytes(tokenB)
bitlength = len(bitsA)
if bitlength != len(bitsB):
raise IndexError(
"All tokens need to be of equal bit length. Offending tokens: {} and {}".format(tokenA, tokenB))
# finally do the real work:
# total number of times (bits) subsequent tokens agree.
bAgree = ~ (bitsA ^ bitsB) # type: bitarray
congruencelist.append(bAgree.count() / bitlength)
except TypeError as e:
raise TypeError("Tokens must be convertible to bytes, which failed because: {} ".format(e))
return congruencelist
class BitCongruenceGauss(BitCongruence):
"""
Noise reduced bitwise congruence: Simple Matching [Sokal & Michener].
"""
_bcvalues = None
def setAnalysisParams(self, sigma=1.5):
if isinstance(sigma, tuple):
self._analysisArgs = sigma[0]
elif isinstance(sigma, float):
self._analysisArgs = sigma
else:
raise TypeError('Parameter sigma is not valid')
def analyze(self):
if not self._analysisArgs:
raise ParametersNotSet('Analysis parameter missing: sigma.')
sigma = self._analysisArgs
super().analyze()
self._bcvalues = self._values
self._values = list(gaussian_filter1d(self._values, sigma))
@property
def bitcongruences(self):
if self._bcvalues is None:
return None
return [0.0] * self.startskip + self._bcvalues
def messageSegmentation(self) -> List[MessageSegment]:
"""
Segment message by determining local maxima of sigma-1.5-gauss-filtered bit-congruence.
>>> from netzob.Model.Vocabulary.Messages.L4NetworkMessage import L4NetworkMessage
>>> tstmsg = '19040aec0000027b000012850a6400c8d23d06a2535ed71ed23d09faa4673315d23d09faa1766325d23d09faa17b4b10'
>>> l4m = L4NetworkMessage(bytes.fromhex(tstmsg))
>>> hbg = BitCongruenceGauss(l4m)
>>> hbg.setAnalysisParams()
>>> hbg.analyze()
>>> spm = hbg.messageSegmentation()
>>> print(b''.join([seg.bytes for seg in spm]).hex() == spm[0].message.data.hex())
True
:return: Segmentation of this message based on this analyzer's type.
"""
if not self.values:
if not self._analysisArgs:
raise ValueError('No values or analysis parameters set.')
self.analyze()
bclmins = self.pinpointMinima()
cutCandidates = [0] + [int(b) for b in bclmins] + [len(self._message.data)] # add the message end
cutPositions = [0] + [right for left, right in zip(
cutCandidates[:-1], cutCandidates[1:]
) if right - left > 1]
if cutPositions[-1] != cutCandidates[-1]:
cutPositions[-1] = cutCandidates[-1]
segments = list()
for lmaxCurr, lmaxNext in zip(cutPositions[:-1], cutPositions[1:]):
segments.append(MessageSegment(self, lmaxCurr, lmaxNext-lmaxCurr))
return segments
def pinpointMinima(self):
"""
Pinpoint the exact positions of local minima within the scope of each smoothed local minimum.
The exact position is looked for in self.bitcongruences.
:return: One exact local minium m in the interval ( center(m_n-1, m_n), center(m_n, m_n+1) )
for each n in (0, smoothed local minimum, -1)
"""
localminima = MessageAnalyzer.localMinima(self.values) # List[idx], List[min]
# localmaxima = MessageAnalyzer.localMaxima(self.values) # List[idx], List[max]
# for lminix in range(len(localminima)):
# localminima[lminix]
lminAO = [0] + localminima[0] + [len(self._message.data)]
lminMed = (numpy.round(numpy.ediff1d(lminAO) / 2) + lminAO[:-1]).astype(int)
bclmins = [medl + numpy.argmin(self.bitcongruences[medl:medr]) for medl, medr in zip(lminMed[:-1], lminMed[1:])]
return bclmins
class BitCongruenceDelta(BitCongruence):
_bcvalues = None
def analyze(self):
"""
Delta of bitwise congruence. see :func:`MessageAnalyzer.bitCongruence`
not unit-dependant, always byte-wise
:return: list of amplitudes of bit congruence from index i = 1 to n between bits of i-1 and i
"""
super().analyze()
self._bcvalues = self._values
self._values = MessageAnalyzer.tokenDelta(self._values)
self._startskip += 1
assert self._startskip + len(self._values) == len(self._message.data), \
"{} + {} != {}".format(self._startskip, len(self._values), len(self._message.data))
@property
def bitcongruences(self):
"""
:return: basic bit congruences
"""
if self._bcvalues is None:
return None
return [numpy.nan] * super().startskip + self._bcvalues
class BitCongruenceDeltaGauss(BitCongruenceDelta):
_bcdvalues = None
# _sensitivity = 0.33
# """Sensitivity threshold for the smoothed extrema."""
def setAnalysisParams(self, sigma=1.5):
self._analysisArgs = (sigma, )
def analyze(self):
from collections import Sequence
if not self._analysisArgs or not isinstance(self._analysisArgs, Sequence):
raise ParametersNotSet('Analysis parameter missing: horizon and sigma.')
sigma, = self._analysisArgs
super().analyze()
self._bcdvalues = self._values
bcv = numpy.array(self._values)
assert not numpy.isnan(bcv).any()
# bcv could be filtered by: [~numpy.isnan(bcv)]
self._values = list(gaussian_filter1d(bcv, sigma)) # + [numpy.nan]
assert self._startskip + len(self._values) == len(self._message.data), \
"{} + {} != {}".format(self._startskip, len(self._values), len(self._message.data))
@property
def bcdeltas(self):
"""
:return: bit congruence deltas without smoothing
"""
if self._bcdvalues is None:
return None
return [numpy.nan] * self.startskip + self._bcdvalues
def messageSegmentation(self) -> List[MessageSegment]:
"""
Segment message by determining inflection points of sigma-s-gauss-filtered bit-congruence.
The cut position is the delta max of the unsmoothed bcd in the scope of a min/max (rising) pair.
additionally cut at high plateaus starts in the basic bc values.
:return: Segmentation of this message based on this analyzer's type.
"""
if not self.values:
if not self._analysisArgs:
raise ValueError('No values or analysis parameters set.')
self.analyze()
# cut one byte before the inflection
inflectionPoints = self.inflectionPoints()
inflectionCuts = [ int(i)-1 for i in inflectionPoints[0]]
# # cut one byte before the plateau
# # | has yielded mixed quality results (was better for dhcp, much worse for ntp and dns)
# # | TODO probably having some kind of precedence whether inflection or plateau is to be kept
# # | if both cut positions are near to each other might make this worthwhile.
# highPlats = self.bcHighPlateaus()
# highPlatCuts = [ int(i)-1 for i in highPlats[0]]
# # below: sorted( + highPlatCuts)
# get candidates to cut segments from message
cutCandidates = [0] + inflectionCuts \
+ [len(self._message.data)] # add the message end
# cut only where a segment is of a length larger than 1
cutPositions = [0] + [right for left, right in zip(
cutCandidates[:-1], cutCandidates[1:]
) if right - left > 1]
# cutPositions = list(sorted(cutPositions + nansep[0]))
# add the end of the message if its not already there
if cutPositions[-1] != cutCandidates[-1]:
cutPositions[-1] = cutCandidates[-1]
segments = list()
for cutCurr, cutNext in zip(cutPositions[:-1], cutPositions[1:]):
segments.append(MessageSegment(self, cutCurr, cutNext-cutCurr))
return segments
def extrema(self) -> List[Tuple[int, bool]]:
"""
:return: all extrema of the smoothed bcd, each described by a tuple of its index and bool (min is False)
"""
bcdNR = self.values
lmin = MessageAnalyzer.localMinima(bcdNR)
lmax = MessageAnalyzer.localMaxima(bcdNR)
nrExtrema = sorted(
[(i, False) for i in lmin[0]] + [(i, True) for i in lmax[0]], key=lambda k: k[0])
return nrExtrema
def risingDeltas(self) -> List[Tuple[int, numpy.ndarray]]:
"""
the deltas in the original bcd (so: 2nd delta) between minima and maxima in smoothed bcd
:return: offset of and the bcd-delta values starting at this position in rising parts of the smoothed bcd.
Thus, offset is a minimum + 1 and the array covers the indices up to the following maximum, itself included.
"""
extrema = self.extrema()
risingdeltas = [ ( i[0] + 1, numpy.ediff1d(self.bcdeltas[i[0]:j[0]+1]) ) # include index of max
for i, j in zip(extrema[:-1], extrema[1:])
if i[1] == False and j[1] == True and j[0]+1 - i[0] > | |
"1"'
err_str = err_str.format(filename, bm_name, match2[1])
logging.error(err_str)
json_results[key]['benchmarks'][idx]['filter_location'] =\
filter_location
json_results[key]['benchmarks'][idx]['federate_count'] =\
federate_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
info_str = ('Test type = filter\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
elif 'messageLookup' in filename:
for idx, results_dict in enumerate(
json_results[key]['benchmarks']):
bm_name = results_dict['name']
if 'multiCore' in bm_name:
# Interface count and federate count
match = re.search(r'/\d+/\d+/', bm_name)
match2 = re.findall(r'\d+', match.group(0))
interface_count = int(match2[0])
federate_count = int(match2[1])
json_results[key]['benchmarks'][idx]['interface_count'] = \
interface_count
json_results[key]['benchmarks'][idx]['federate_count'] = \
federate_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
info_str = ('Test type = messageLookup\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
elif 'messageSend' in filename:
for idx, results_dict in enumerate(
json_results[key]['benchmarks']):
bm_name = results_dict['name']
# Message size and message count
match = re.search(r'/\d+/\d+/', bm_name)
match2 = re.findall(r'\d+', match.group(0))
message_size = int(match2[0])
message_count = int(match2[1])
json_results[key]['benchmarks'][idx]['message_size'] = \
message_size
json_results[key]['benchmarks'][idx]['message_count'] = \
message_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
info_str = ('Test type = messageSend\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
elif 'ring' in filename:
for idx, results_dict in enumerate(
json_results[key]['benchmarks']):
bm_name = results_dict['name']
if 'multiCore' in bm_name:
# Federate count
match = re.search(r'/\d+/', bm_name)
federate_count = int(match.group(0)[1:-1])
json_results[key]['benchmarks'][idx]['federate_count'] = \
federate_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
if 'ringResults' in filename:
info_str = ('Test type = ringResults\n'
' Added benchmark metadata to {} ')
if 'ringMessage' in filename:
info_str = ('Test type = ringMessage\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
elif 'phold' in filename:
for idx, results_dict in enumerate(
json_results[key]['benchmarks']):
bm_name = results_dict['name']
# Federate count
match = re.search(r'/\d+/', bm_name)
federate_count = int(match.group(0)[1:-1])
json_results[key]['benchmarks'][idx]['federate_count'] = \
federate_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
info_str = ('Test type = phold\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
elif 'timing' in filename:
for idx, results_dict in enumerate(
json_results[key]['benchmarks']):
bm_name = results_dict['name']
# Federate count
match = re.search(r'/\d+/', bm_name)
federate_count = int(match.group(0)[1:-1])
json_results[key]['benchmarks'][idx]['federate_count'] = \
federate_count
# Core type
json_results = _add_core(
bm_name, filename, json_results, key, idx)
info_str = ('Test type = timing\n'
' Added benchmark metadata to {} ')
info_str = info_str.format(filename)
logging.info(info_str)
return json_results
def _add_core(bm_name, filename, json_results, key, idx):
"""This function parses the benchmark name to add the core type to
benchmark metadata.
Args:
bm_name (str) - Benchmark name string which contains the core
type information
filename (str) - Path and name of file in which the benchmark
information is contained. Only used for error reporting.
json_results (dict) - Dictionary of all benchmark results
(keyed by benchmark results filename) that the data and
metadata from json_file are being added to.
key (str) - Key for dictionary for this benchmark results
idx (str) - Index of the current benchmark being processed.
needed so core type information can be written into the correct
location in the json_results dictionary.
Returns:
json_results (dict) - Dictionary of all benchmark results
(keyed by benchmark results filename) that metadata from the
benchmark name are being added to.
"""
if 'multiCore/' in bm_name:
core_match = re.search('multiCore/.*?Core', bm_name)
if core_match:
core_name = core_match.group(0)[10:-4]
json_results[key]['benchmarks'][idx]['core_type'] = core_name
else:
logging.error('No core_type added to {} in {}'.format(
bm_name,
filename))
# TDH (2019-12-29)
# This is trying to deal with the inconsistency in the naming
# convention that, as of this writing, exists in the results files.
# Hopefully we can soon arrive at a convention and retroactively
# change all the results files to conform to that convention.
elif 'singleFed/' in bm_name:
json_results[key]['benchmarks'][idx]['core_type'] = 'singleFed'
elif 'singleCore/' in bm_name:
json_results[key]['benchmarks'][idx]['core_type'] = 'singleCore'
else:
json_results[key]['benchmarks'][idx]['core_type'] = 'unspecified'
if ('conversion' not in bm_name
and 'interpret' not in bm_name
and 'AM' not in bm_name):
# TDH (2019-12-19)
# I know these benchmarks don't have a core specified and
# don't want to write out a warning and clutter up the log
# file.
warn_str = 'Unable to find core type in {} in {}; ' \
'setting to "unspecified"'
warn_str = warn_str.format(bm_name, filename)
logging.warning(warn_str)
return json_results
def _check_missing_core_type(json_results):
"""This function checks the json_results dictionary to see if any of
the benchmaks are missing a value for core_type. This is a
trouble-shooting function developed by Trevor to help hunt down some
graphing bugs and was retained because it seems useful. Results from
the test are written to the log file.
Args:
json_results (dict) - Dictionary of all benchmark results
(keyed by benchmark results filename) that the data and
metadata from json_file are being added to.
Returns:
null
"""
for uuid in json_results:
for idx, benchmark in enumerate(json_results[uuid]['benchmarks']):
if 'core_type' not in benchmark:
logging.error('No core_type found in {} in {}'.format(
benchmark,
json_results[uuid]['filename']))
else:
logging.info('core_type found in {} in {}'.format(
benchmark,
json_results[uuid]['filename']))
def _add_run_id(key, json_results):
"""This function parses the filename to extract the 5 character
run ID for a given file and adds it to the json_results dictionary
Args:
json_results (dict) - Dictionary of all benchmark results
(keyed by benchmark results filename) that the data and
metadata from json_file are being added to.
Returns:
json_results (dict) - json_results with run_id added for
the indicated results file.
"""
match = re.search(r'\d_.*?\.txt', json_results[key]['filename'])
if match:
run_id = match.group(0)[2:-4]
json_results[key]['run_id'] = run_id
else:
json_results[key]['run_id'] = ''
return json_results
def _parse_compiler_string(uuid, json_results):
"""This function parses the compiler string in the metadata header
line and adds it to the json_results metadata for the benchmark
Args:
json_results (dict) - Dictionary of all benchmark results
(keyed by benchmark results filename) that the data and
metadata from json_file are being added to.
Returns:
json_results (dict) - json_results with compiler metadata
extracted and added to the results for a given
benchmark.
"""
# Since I'm going to be using it alot...
compiler_str = json_results[uuid]['compiler_info_string']
# Generator
generators = ['Ninja',
'Visual Studio 15 2017',
'Visual Studio 16 2019',
'Unix Makefiles',
'MSYS Makefiles']
match = re.search('^.*?:', compiler_str)
matched_generator = False
for item in generators:
if item in match.group(0):
json_results[uuid]['generator'] = item
matched_generator = True
break
else:
json_results[uuid]['generator'] = ''
matched_generator = False
if matched_generator is False:
err_str = 'Unable to match element in string "{}" to ' \
'known generator in compiler options: {}'
err_str = err_str.format(match.group(0), pp.pformat(generators))
logging.error(err_str)
# System
match = re.search(r'\s.*?:', compiler_str)
if match:
match_linux = re.search(r'Linux-.*?:', match.group(0))
match_windows = re.search(r'Windows-[\d|\.]*', match.group(0))
match_darwin = re.search(r'Darwin-[\d|\.]*', match.group(0))
if match_linux:
# Linux system
json_results[uuid]['system'] = 'Linux'
linux_version = match_linux.group(0)[6:]
json_results[uuid]['system_version'] = linux_version
# Splitting up the Linux version string
match3 = re.search(r'\d+\.', linux_version)
json_results[uuid]['linux_kernel_version'] = match3.group(0)[:-1]
match3 = re.search(r'\.\d+\.', linux_version)
json_results[uuid]['linux_major_version'] = match3.group(0)[1:-1]
match3 = re.search(r'\.\d+-', linux_version)
json_results[uuid]['linux_minor_version'] = match3.group(0)[1:-1]
# TDH: There's some weirdness with the bug fix version
# and/or distro string. I'm doing my best to handle it.
match3 = re.search(r'-\d+-', linux_version)
if match3:
json_results[uuid]['linux_bug_fix_version'] =\
match3.group(0)[1:-1]
match4 = re.search(r'-(?!\d).*$', linux_version)
json_results[uuid]['linux_distro_string'] =\
match4.group(0)[1:-1]
else:
match3 = re.search(r'-.*:(?!-\d+\.\d+\.\d+-)', linux_version)
if match3:
json_results[uuid]['linux_bug_fix_version'] = \
match3.group(0)[1:-1]
json_results[uuid]['linux_distro_string'] = ''
else:
err_str = 'Unable to parse Linux ' \
'kernel bug fix version: {}'
err_str = err_str.format(linux_version)
logging.error(err_str)
elif match_windows:
# Windows
json_results[uuid]['system'] = 'Windows'
windows_version = match_windows.group(0)[8:]
json_results[uuid]['system_version'] = windows_version
elif match_darwin:
# Darwin (Mac) system
json_results[uuid]['system'] = 'Darwin'
darwin_version = match_darwin.group(0)[7:]
json_results[uuid]['system_version'] = darwin_version
# Splitting up the Linux version string
match3 = re.search(r'\d+\.', darwin_version)
json_results[uuid]['darwin_kernel_version'] = match3.group(0)[:-1]
match3 = re.search(r'\.\d+\.', darwin_version)
json_results[uuid]['darwin_major_version'] = match3.group(0)[1:-1]
match3 = re.search(r'\.\d+$', darwin_version)
json_results[uuid]['darwin_minor_version'] = match3.group(0)[1:]
elif not match:
match_linux = re.search(r'Linux-.*?:', compiler_str)
match_windows = re.search(r'Windows-[\d|\.]*', compiler_str)
match_darwin = re.search(r'Darwin-[\d|\.]*', compiler_str)
if match_linux:
# Linux system
json_results[uuid]['system'] = 'Linux'
linux_version = match_linux.group(0)[6:]
json_results[uuid]['system_version'] = linux_version
# Splitting up the Linux version string
match3 = re.search(r'\d+\.', linux_version)
json_results[uuid]['linux_kernel_version'] = match3.group(0)[:-1]
match3 = re.search(r'\.\d+\.', linux_version)
json_results[uuid]['linux_major_version'] = match3.group(0)[1:-1]
match3 = re.search(r'\.\d+-', linux_version)
json_results[uuid]['linux_minor_version'] = match3.group(0)[1:-1]
# TDH: There's some weirdness with the bug fix version
# and/or distro string. I'm doing my best | |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
from typing import List, Dict
import requests
import base64
requests.packages.urllib3.disable_warnings()
"""
Created on August 1, 2019
@author: <NAME>
"""
''' GLOBAL VARS '''
AUTH_KEY = demisto.params().get('apikey')
BASE_API = demisto.params().get('apiurl', 'https://oti.slashnext.cloud/api')
if BASE_API.endswith('/'):
BASE_API = BASE_API.strip('/')
VERIFY = not demisto.params().get('unsecure', False)
HOST_REPUTE_API = '/oti/v1/host/reputation'
URL_SCAN_API = '/oti/v1/url/scan'
URL_SCANSYNC_API = '/oti/v1/url/scansync'
HOST_REPORT_API = '/oti/v1/host/report'
DL_SC_API = '/oti/v1/download/screenshot'
DL_HTML_API = '/oti/v1/download/html'
DL_TEXT_API = '/oti/v1/download/text'
''' HELPERS FUNCTIONS '''
@logger
def http_request(endpoint, data, method='POST'):
"""
Make the http request to SlashNext cloud API endpoint with the given API args
:param endpoint: Corresponds to SlashNext cloud API to be invoked
:param data: Parameter dictionary as part of data
:param method: HTTP method to be used for API i.e. GET or POST
:return: Response of the SlashNext web API in json format
"""
url = BASE_API + endpoint
data['authkey'] = AUTH_KEY
response = requests.request(method, url=url, data=data, timeout=300, verify=VERIFY)
if response.status_code == 200:
try:
return response.json()
except Exception as e:
return_error('Response JSON decoding failed due to {}'.format(str(e)))
else:
return_error('API Returned, {}:{}'.format(response.status_code, response.reason))
def get_dbot_score(verdict):
"""
Evaluate the dbot (Demisto) score as per verdict from SlashNext cloud API
:param verdict: SlashNext verdict on a certain IoC
:return: Dbot score
"""
if verdict == 'Malicious':
return 3
elif verdict == 'Suspicious':
return 2
elif verdict == 'Benign' or verdict == 'Redirector':
return 1
else:
return 0
def get_dbot_std_context(indicator, ioc_type, verdict, threat_type):
"""
Makes the dictionary for dbot score and standard Demisto contexts
:param indicator: IoC value
:param ioc_type: IoC type, ip, domain or url
:param verdict: Verdict by SlashNext OTI cloud
:param threat_type: Threat type reported by SlashNext OTI cloud
:return: Dbot score context dictionary, dbot standard context dictionary
"""
dbot_score = get_dbot_score(verdict)
dbot_score_cont = {
'Indicator': indicator,
'Type': ioc_type.lower(),
'Vendor': 'SlashNext Phishing Incident Response',
'Score': dbot_score
}
if ioc_type.lower() == 'ip':
standard_cont = {
'Address': indicator
}
elif ioc_type.lower() == 'domain':
standard_cont = {
'Name': indicator
}
else:
standard_cont = {
'Data': indicator
}
if dbot_score == 3:
standard_cont['Malicious'] = {
'Vendor': 'SlashNext Phishing Incident Response',
'Description': 'Detected "{}" Activity'.format(threat_type)
}
return dbot_score_cont, standard_cont
def get_snx_host_ioc_context(indicator, ioc_type, threat_data):
"""
Make the dictionary for SlashNext IoC contexts for hosts
:param indicator: IoC value
:param ioc_type: IoC type
:param threat_data: Threat data by SlashNext OTI cloud
:return: SlashNext IoC context dictionary
"""
snx_ioc_cont = {
'Value': indicator,
'Type': ioc_type,
'Verdict': threat_data.get('verdict'),
'ThreatStatus': threat_data.get('threatStatus'),
'ThreatType': threat_data.get('threatType'),
'ThreatName': threat_data.get('threatName'),
'FirstSeen': threat_data.get('firstSeen'),
'LastSeen': threat_data.get('lastSeen')
}
return snx_ioc_cont
def get_snx_url_ioc_context(url_data, is_scan=False):
"""
Make the dictionary for SlashNext URL IoC contexts for URLs
:param url_data: URL data received in json format
:param is_scan: Is Scan ID to be included
:return: List of SlashNext IoC context dictionaries, Entry context dictionary
"""
snx_ioc_cont_list = []
dbot_score_cont_list = []
url_cont_list = []
url_threat_data = url_data.get('threatData')
snx_ioc_cont = {
'Value': url_data.get('url'),
'Type': 'Scanned URL',
'Verdict': url_threat_data.get('verdict'),
'ThreatStatus': url_threat_data.get('threatStatus'),
'ThreatType': url_threat_data.get('threatType'),
'ThreatName': url_threat_data.get('threatName'),
'FirstSeen': url_threat_data.get('firstSeen'),
'LastSeen': url_threat_data.get('lastSeen')
}
if is_scan is True:
snx_ioc_cont['ScanID'] = url_data.get('scanId')
dbot_score_cont, url_cont = get_dbot_std_context(
url_data.get('url'), 'url',
url_threat_data.get('verdict'),
url_threat_data.get('threatType'))
dbot_score_cont_list.append(dbot_score_cont)
if url_cont is not None:
url_cont_list.append(url_cont)
if url_data.get('landingUrl') is None:
if url_data.get('finalUrl') is not None and url_data.get('finalUrl') != 'N/A':
dbot_final_score_cont, final_url_cont = get_dbot_std_context(
url_data.get('finalUrl'), 'url',
url_threat_data.get('verdict'),
url_threat_data.get('threatType'))
dbot_score_cont_list.append(dbot_final_score_cont)
if final_url_cont is not None:
url_cont_list.append(final_url_cont)
snx_final_ioc_cont = {
'Value': url_data.get('finalUrl'),
'Type': 'Final URL',
'Verdict': url_threat_data.get('verdict')
}
snx_ioc_cont['Final'] = snx_final_ioc_cont.copy()
snx_ioc_cont_list.append(snx_ioc_cont)
snx_final_ioc_cont['Value'] = '--------> {}'.format(url_data.get('finalUrl'))
snx_ioc_cont_list.append(snx_final_ioc_cont)
else:
snx_ioc_cont_list.append(snx_ioc_cont)
else:
landing = url_data.get('landingUrl')
landing_threat_data = landing.get('threatData')
dbot_landing_score_cont, landing_url_cont = get_dbot_std_context(
landing.get('url'), 'url',
landing_threat_data.get('verdict'),
landing_threat_data.get('threatType'))
dbot_score_cont_list.append(dbot_landing_score_cont)
if landing_url_cont is not None:
url_cont_list.append(landing_url_cont)
snx_landing_ioc_cont = {
'Value': landing.get('url'),
'Type': 'Redirected URL',
'Verdict': landing_threat_data.get('verdict'),
'ThreatStatus': landing_threat_data.get('threatStatus'),
'ThreatType': landing_threat_data.get('threatType'),
'ThreatName': landing_threat_data.get('threatName'),
'FirstSeen': landing_threat_data.get('firstSeen'),
'LastSeen': landing_threat_data.get('lastSeen')
}
if is_scan is True:
snx_landing_ioc_cont['ScanID'] = landing.get('scanId')
snx_ioc_cont['Landing'] = snx_landing_ioc_cont.copy()
snx_ioc_cont_list.append(snx_ioc_cont)
snx_landing_ioc_cont['Value'] = '--------> {}'.format(landing.get('url'))
snx_ioc_cont_list.append(snx_landing_ioc_cont)
return snx_ioc_cont_list, dbot_score_cont_list, url_cont_list
def download_forensics_data(scanid, tag, screenshot=False, html=False, txt=False):
"""
Download the selected forensics data from SlashNext cloud
:param scanid: Scan ID for which foresics data to download
:param tag: String to tag the corresponding forensics data file
:param screenshot: Holds true if screenshot is to be downloaded
:param html: Holds true if the HTML is to be downloaded
:param txt: Holds true if the text is to be downloaded
:return: None
"""
error_no = 0
error_msg = 'Success'
show_error_msg = True
if screenshot is True:
# Host Screenshot Section
api_data = {
'scanid': scanid,
'resolution': 'medium'
}
response = http_request(endpoint=DL_SC_API, data=api_data)
if response.get('errorNo') != 0:
error_no = response.get('errorNo')
error_msg = response.get('errorMsg')
else:
show_error_msg = False
sc_base64 = response.get('scData').get('scBase64')
sc_data = base64.b64decode(sc_base64)
sc_file = fileResult('slashnext_{}.jpg'.format(scanid), sc_data, entryTypes['image'])
demisto.results({
'Type': entryTypes['image'],
'ContentsFormat': formats['text'],
'Contents': 'Forensics: Webpage Screenshot for the ' + tag,
'File': sc_file.get('File'),
'FileID': sc_file.get('FileID')
})
if html is True:
# Host HTML Section
api_data = {
'scanid': scanid
}
response = http_request(endpoint=DL_HTML_API, data=api_data)
if response.get('errorNo') == 0:
show_error_msg = False
html_base64 = response.get('htmlData').get('htmlBase64')
html_data = base64.b64decode(html_base64)
html_file = fileResult('slashnext_{}.html'.format(scanid), html_data, entryTypes['file'])
demisto.results({
'Type': entryTypes['file'],
'ContentsFormat': formats['text'],
'Contents': 'Forensics: Webpage HTML for the ' + tag,
'File': html_file.get('File'),
'FileID': html_file.get('FileID')
})
if txt is True:
# Host Text Section
api_data = {
'scanid': scanid
}
response = http_request(endpoint=DL_TEXT_API, data=api_data)
if response.get('errorNo') == 0:
show_error_msg = False
text_base64 = response.get('textData').get('textBase64')
text_data = base64.b64decode(text_base64)
text_file = fileResult('slashnext_{}.txt'.format(scanid), text_data, entryTypes['file'])
demisto.results({
'Type': entryTypes['file'],
'ContentsFormat': formats['text'],
'Contents': 'Forensics: Webpage Rendered Text for the ' + tag,
'File': text_file.get('File'),
'FileID': text_file.get('FileID')
})
# Show Error Message
if show_error_msg is True and (screenshot is True or html is True or txt is True):
demisto.results('API Returned, {}:{}'.format(error_no, error_msg))
''' COMMAND FUNCTIONS '''
def validate_snx_api_key():
"""
Validate the provided SlashNext cloud API key and test connection, in case of any error exit the program
@:return: None
"""
api_data = {
'host': 'www.google.com'
}
response = http_request(endpoint=HOST_REPUTE_API, data=api_data)
if response.get('errorNo') != 0:
return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))
return 'ok'
def ip_lookup(ip):
"""
Execute SlashNext's host/reputation API against the requested IP address with the given parameters
:param ip: IP address whose reputation needs to be fetched
:return: Response of the SlashNext host/reputation API
"""
# Create the required data dictionary for Host/Reputation
api_data = {
'host': ip
}
response = http_request(endpoint=HOST_REPUTE_API, data=api_data)
if response.get('errorNo') != 0:
return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))
return response
def ip_command():
"""
Execute SlashNext's host/reputation API against the requested IP reputation command with the given parameters
@:return: None
"""
# 1. Get input host from Demisto
ip = demisto.args().get('ip')
if not is_ip_valid(ip):
return_error('Invalid IP address, Please retry with a valid IP address')
# 2. Get the host reputation from SlashNext API
response = ip_lookup(ip=ip)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
dbot_score_cont, ip_cont = get_dbot_std_context(
ip, 'IP', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))
snx_ioc_cont = get_snx_host_ioc_context(ip, 'IP', response.get('threatData'))
ec = {
'SlashNext.IP(val.Value === obj.Value)': snx_ioc_cont,
'DBotScore': dbot_score_cont,
'IP': ip_cont
}
title = 'SlashNext Phishing Incident Response - IP Lookup\n' \
'##### ip = {}'.format(ip)
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
def domain_lookup(domain):
"""
Execute SlashNext's host/reputation API against the requested domain with the given parameters
:param domain: Domain whose reputation needs to be fetched
:return: Response of the SlashNext host/reputation API
"""
# Create the required data dictionary for Host/Reputation
api_data = {
'host': domain
}
response = http_request(endpoint=HOST_REPUTE_API, data=api_data)
if response.get('errorNo') != 0:
return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))
return response
def domain_command():
"""
Execute SlashNext's host/reputation API against the requested domain reputation command with the given parameters
@:return: None
"""
# 1. Get input host from Demisto
domain = demisto.args().get('domain')
# 2. Get the host reputation from SlashNext API
response = domain_lookup(domain=domain)
if response.get('errorNo') != 0:
return
# 3. Parse and format the response
dbot_score_cont, domain_cont = get_dbot_std_context(
domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))
snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))
ec = {
'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,
'DBotScore': dbot_score_cont,
'Domain': domain_cont
}
domain = domain.encode('idna')
title = 'SlashNext Phishing Incident Response - Domain Lookup\n' \
'##### domain = {}'.format(domain.decode())
md = tableToMarkdown(
title,
snx_ioc_cont,
['Value',
'Type',
'Verdict',
'ThreatStatus',
'ThreatName',
'ThreatType',
'FirstSeen',
'LastSeen']
)
return_outputs(md, ec, snx_ioc_cont)
def host_reputation(host):
"""
Execute SlashNext's host/reputation API against the requested host with the given parameters
:param | |
'Amite': {'pop': 13131, 'tracts': 3},
'Attala': {'pop': 19564, 'tracts': 6},
'Benton': {'pop': 8729, 'tracts': 2},
'Bolivar': {'pop': 34145, 'tracts': 8},
'Calhoun': {'pop': 14962, 'tracts': 5},
'Carroll': {'pop': 10597, 'tracts': 2},
'Chickasaw': {'pop': 17392, 'tracts': 4},
'Choctaw': {'pop': 8547, 'tracts': 3},
'Claiborne': {'pop': 9604, 'tracts': 3},
'Clarke': {'pop': 16732, 'tracts': 4},
'Clay': {'pop': 20634, 'tracts': 5},
'Coahoma': {'pop': 26151, 'tracts': 7},
'Copiah': {'pop': 29449, 'tracts': 6},
'Covington': {'pop': 19568, 'tracts': 4},
'DeSoto': {'pop': 161252, 'tracts': 33},
'Forrest': {'pop': 74934, 'tracts': 17},
'Franklin': {'pop': 8118, 'tracts': 2},
'George': {'pop': 22578, 'tracts': 5},
'Greene': {'pop': 14400, 'tracts': 2},
'Grenada': {'pop': 21906, 'tracts': 5},
'Hancock': {'pop': 43929, 'tracts': 7},
'Harrison': {'pop': 187105, 'tracts': 46},
'Hinds': {'pop': 245285, 'tracts': 64},
'Holmes': {'pop': 19198, 'tracts': 5},
'Humphreys': {'pop': 9375, 'tracts': 3},
'Issaquena': {'pop': 1406, 'tracts': 1},
'Itawamba': {'pop': 23401, 'tracts': 5},
'Jackson': {'pop': 139668, 'tracts': 28},
'Jasper': {'pop': 17062, 'tracts': 4},
'Jefferson': {'pop': 7726, 'tracts': 2},
'<NAME>': {'pop': 12487, 'tracts': 3},
'Jones': {'pop': 67761, 'tracts': 14},
'Kemper': {'pop': 10456, 'tracts': 2},
'Lafayette': {'pop': 47351, 'tracts': 10},
'Lamar': {'pop': 55658, 'tracts': 8},
'Lauderdale': {'pop': 80261, 'tracts': 19},
'Lawrence': {'pop': 12929, 'tracts': 3},
'Leake': {'pop': 23805, 'tracts': 5},
'Lee': {'pop': 82910, 'tracts': 19},
'Leflore': {'pop': 32317, 'tracts': 8},
'Lincoln': {'pop': 34869, 'tracts': 6},
'Lowndes': {'pop': 59779, 'tracts': 14},
'Madison': {'pop': 95203, 'tracts': 21},
'Marion': {'pop': 27088, 'tracts': 6},
'Marshall': {'pop': 37144, 'tracts': 6},
'Monroe': {'pop': 36989, 'tracts': 9},
'Montgomery': {'pop': 10925, 'tracts': 3},
'Neshoba': {'pop': 29676, 'tracts': 7},
'Newton': {'pop': 21720, 'tracts': 5},
'Noxubee': {'pop': 11545, 'tracts': 3},
'Oktibbeha': {'pop': 47671, 'tracts': 8},
'Panola': {'pop': 34707, 'tracts': 6},
'<NAME>': {'pop': 55834, 'tracts': 9},
'Perry': {'pop': 12250, 'tracts': 3},
'Pike': {'pop': 40404, 'tracts': 8},
'Pontotoc': {'pop': 29957, 'tracts': 6},
'Prentiss': {'pop': 25276, 'tracts': 5},
'Quitman': {'pop': 8223, 'tracts': 3},
'Rankin': {'pop': 141617, 'tracts': 27},
'Scott': {'pop': 28264, 'tracts': 6},
'Sharkey': {'pop': 4916, 'tracts': 2},
'Simpson': {'pop': 27503, 'tracts': 5},
'Smith': {'pop': 16491, 'tracts': 3},
'Stone': {'pop': 17786, 'tracts': 3},
'Sunflower': {'pop': 29450, 'tracts': 7},
'Tallahatchie': {'pop': 15378, 'tracts': 4},
'Tate': {'pop': 28886, 'tracts': 5},
'Tippah': {'pop': 22232, 'tracts': 4},
'Tishomingo': {'pop': 19593, 'tracts': 4},
'Tunica': {'pop': 10778, 'tracts': 3},
'Union': {'pop': 27134, 'tracts': 6},
'Walthall': {'pop': 15443, 'tracts': 3},
'Warren': {'pop': 48773, 'tracts': 12},
'Washington': {'pop': 51137, 'tracts': 19},
'Wayne': {'pop': 20747, 'tracts': 4},
'Webster': {'pop': 10253, 'tracts': 3},
'Wilkinson': {'pop': 9878, 'tracts': 2},
'Winston': {'pop': 19198, 'tracts': 5},
'Yalobusha': {'pop': 12678, 'tracts': 3},
'Yazoo': {'pop': 28065, 'tracts': 6}},
'MT': {'Beaverhead': {'pop': 9246, 'tracts': 3},
'<NAME>': {'pop': 12865, 'tracts': 5},
'Blaine': {'pop': 6491, 'tracts': 4},
'Broadwater': {'pop': 5612, 'tracts': 2},
'Carbon': {'pop': 10078, 'tracts': 5},
'Carter': {'pop': 1160, 'tracts': 1},
'Cascade': {'pop': 81327, 'tracts': 22},
'Chouteau': {'pop': 5813, 'tracts': 2},
'Custer': {'pop': 11699, 'tracts': 6},
'Daniels': {'pop': 1751, 'tracts': 1},
'Dawson': {'pop': 8966, 'tracts': 3},
'<NAME>': {'pop': 9298, 'tracts': 3},
'Fallon': {'pop': 2890, 'tracts': 1},
'Fergus': {'pop': 11586, 'tracts': 2},
'Flathead': {'pop': 90928, 'tracts': 19},
'Gallatin': {'pop': 89513, 'tracts': 22},
'Garfield': {'pop': 1206, 'tracts': 1},
'Glacier': {'pop': 13399, 'tracts': 4},
'Golden Valley': {'pop': 884, 'tracts': 1},
'Granite': {'pop': 3079, 'tracts': 1},
'Hill': {'pop': 16096, 'tracts': 6},
'Jefferson': {'pop': 11406, 'tracts': 3},
'<NAME>': {'pop': 2072, 'tracts': 1},
'Lake': {'pop': 28746, 'tracts': 8},
'<NAME>': {'pop': 63395, 'tracts': 14},
'Liberty': {'pop': 2339, 'tracts': 1},
'Lincoln': {'pop': 19687, 'tracts': 5},
'Madison': {'pop': 7691, 'tracts': 3},
'McCone': {'pop': 1734, 'tracts': 1},
'Meagher': {'pop': 1891, 'tracts': 1},
'Mineral': {'pop': 4223, 'tracts': 2},
'Missoula': {'pop': 109299, 'tracts': 20},
'Musselshell': {'pop': 4538, 'tracts': 2},
'Park': {'pop': 15636, 'tracts': 6},
'Petroleum': {'pop': 494, 'tracts': 1},
'Phillips': {'pop': 4253, 'tracts': 1},
'Pondera': {'pop': 6153, 'tracts': 2},
'Powder River': {'pop': 1743, 'tracts': 1},
'Powell': {'pop': 7027, 'tracts': 2},
'Prairie': {'pop': 1179, 'tracts': 1},
'Ravalli': {'pop': 40212, 'tracts': 10},
'Richland': {'pop': 9746, 'tracts': 4},
'Roosevelt': {'pop': 10425, 'tracts': 3},
'Rosebud': {'pop': 9233, 'tracts': 4},
'Sanders': {'pop': 11413, 'tracts': 3},
'Sheridan': {'pop': 3384, 'tracts': 2},
'<NAME>': {'pop': 34200, 'tracts': 8},
'Stillwater': {'pop': 9117, 'tracts': 3},
'<NAME>': {'pop': 3651, 'tracts': 1},
'Teton': {'pop': 6073, 'tracts': 3},
'Toole': {'pop': 5324, 'tracts': 3},
'Treasure': {'pop': 718, 'tracts': 1},
'Valley': {'pop': 7369, 'tracts': 3},
'Wheatland': {'pop': 2168, 'tracts': 1},
'Wibaux': {'pop': 1017, 'tracts': 1},
'Yellowstone': {'pop': 147972, 'tracts': 32}},
'NC': {'Alamance': {'pop': 151131, 'tracts': 36},
'Alexander': {'pop': 37198, 'tracts': 7},
'Alleghany': {'pop': 11155, 'tracts': 3},
'Anson': {'pop': 26948, 'tracts': 6},
'Ashe': {'pop': 27281, 'tracts': 6},
'Avery': {'pop': 17797, 'tracts': 5},
'Beaufort': {'pop': 47759, 'tracts': 11},
'Bertie': {'pop': 21282, 'tracts': 4},
'Bladen': {'pop': 35190, 'tracts': 6},
'Brunswick': {'pop': 107431, 'tracts': 33},
'Buncombe': {'pop': 238318, 'tracts': 56},
'Burke': {'pop': 90912, 'tracts': 18},
'Cabarrus': {'pop': 178011, 'tracts': 37},
'Caldwell': {'pop': 83029, 'tracts': 17},
'Camden': {'pop': 9980, 'tracts': 2},
'Carteret': {'pop': 66469, 'tracts': 38},
'Caswell': {'pop': 23719, 'tracts': 6},
'Catawba': {'pop': 154358, 'tracts': 31},
'Chatham': {'pop': 63505, 'tracts': 13},
'Cherokee': {'pop': 27444, 'tracts': 7},
'Chowan': {'pop': 14793, 'tracts': 3},
'Clay': {'pop': 10587, 'tracts': 2},
'Cleveland': {'pop': 98078, 'tracts': 22},
'Columbus': {'pop': 58098, 'tracts': 13},
'Craven': {'pop': 103505, 'tracts': 21},
'Cumberland': {'pop': 319431, 'tracts': 68},
'Currituck': {'pop': 23547, 'tracts': 8},
'Dare': {'pop': 33920, 'tracts': 11},
'Davidson': {'pop': 162878, 'tracts': 34},
'Davie': {'pop': 41240, 'tracts': 7},
'Duplin': {'pop': 58505, 'tracts': 11},
'Durham': {'pop': 267587, 'tracts': 60},
'Edgecombe': {'pop': 56552, 'tracts': 14},
'Forsyth': {'pop': 350670, 'tracts': 93},
'Franklin': {'pop': 60619, 'tracts': 12},
'Gaston': {'pop': 206086, 'tracts': 65},
'Gates': {'pop': 12197, 'tracts': 3},
'Graham': {'pop': 8861, 'tracts': 3},
'Granville': {'pop': 59916, 'tracts': 13},
'Greene': {'pop': 21362, 'tracts': 4},
'Guilford': {'pop': 488406, 'tracts': 119},
'Halifax': {'pop': 54691, 'tracts': 12},
'Harnett': {'pop': 114678, 'tracts': 27},
'Haywood': {'pop': 59036, 'tracts': 16},
'Henderson': {'pop': 106740, 'tracts': 27},
'Hertford': {'pop': 24669, 'tracts': 5},
'Hoke': {'pop': 46952, 'tracts': 9},
'Hyde': {'pop': 5810, 'tracts': 2},
'Iredell': {'pop': 159437, 'tracts': 44},
'Jackson': {'pop': 40271, 'tracts': 9},
'Johnston': {'pop': 168878, 'tracts': 25},
'Jones': {'pop': 10153, 'tracts': 3},
'Lee': {'pop': 57866, 'tracts': 13},
'Lenoir': {'pop': 59495, 'tracts': 15},
'Lincoln': {'pop': 78265, 'tracts': 18},
'Macon': {'pop': 33922, 'tracts': 9},
'Madison': {'pop': 20764, 'tracts': 6},
'Martin': {'pop': 24505, 'tracts': 6},
'McDowell': {'pop': 44996, 'tracts': 10},
'Mecklenburg': {'pop': 919628, 'tracts': 233},
'Mitchell': {'pop': 15579, 'tracts': 4},
'Montgomery': {'pop': 27798, 'tracts': 6},
'Moore': {'pop': 88247, 'tracts': 18},
'Nash': {'pop': 95840, 'tracts': 18},
'<NAME>': {'pop': 202667, 'tracts': 45},
'Northampton': {'pop': 22099, 'tracts': 5},
'Onslow': {'pop': 177772, 'tracts': 32},
'Orange': {'pop': 133801, 'tracts': 28},
'Pamlico': {'pop': 13144, 'tracts': 4},
'Pasquotank': {'pop': 40661, 'tracts': 10},
'Pender': {'pop': 52217, 'tracts': 16},
'Perquimans': {'pop': 13453, 'tracts': 3},
'Person': {'pop': 39464, 'tracts': 7},
'Pitt': {'pop': 168148, 'tracts': 32},
'Polk': {'pop': 20510, 'tracts': 7},
'Randolph': {'pop': 141752, 'tracts': 28},
'Richmond': {'pop': 46639, 'tracts': 11},
'Robeson': {'pop': 134168, 'tracts': 31},
'Rockingham': {'pop': 93643, 'tracts': 21},
'Rowan': {'pop': 138428, 'tracts': 30},
'Rutherford': {'pop': 67810, 'tracts': 13},
'Sampson': {'pop': 63431, 'tracts': 11},
'Scotland': {'pop': 36157, 'tracts': 7},
'Stanly': {'pop': 60585, 'tracts': 13},
'Stokes': {'pop': 47401, 'tracts': 9},
'Surry': {'pop': 73673, 'tracts': 22},
'Swain': {'pop': 13981, 'tracts': 5},
'Transylvania': {'pop': 33090, 'tracts': 7},
'Tyrrell': {'pop': 4407, 'tracts': 1},
'Union': {'pop': 201292, 'tracts': 41},
'Vance': {'pop': 45422, 'tracts': 10},
'Wake': {'pop': 900993, 'tracts': 187},
'Warren': {'pop': 20972, 'tracts': 6},
'Washington': {'pop': 13228, 'tracts': 3},
'Watauga': {'pop': 51079, 'tracts': 13},
'Wayne': {'pop': 122623, 'tracts': 26},
'Wilkes': {'pop': 69340, 'tracts': 14},
'Wilson': {'pop': 81234, 'tracts': 19},
'Yadkin': {'pop': 38406, 'tracts': 7},
'Yancey': {'pop': 17818, 'tracts': 5}},
'ND': {'Adams': {'pop': 2343, 'tracts': 1},
'Barnes': {'pop': 11066, 'tracts': 4},
'Benson': {'pop': 6660, 'tracts': 4},
'Billings': {'pop': 783, 'tracts': 1},
'Bottineau': {'pop': 6429, 'tracts': 3},
'Bowman': {'pop': 3151, 'tracts': 2},
'Burke': {'pop': 1968, 'tracts': 1},
'Burleigh': {'pop': 81308, 'tracts': 19},
'Cass': {'pop': 149778, 'tracts': 33},
'Cavalier': {'pop': 3993, 'tracts': 2},
'Dickey': {'pop': 5289, 'tracts': 3},
'Divide': {'pop': 2071, 'tracts': 1},
'Dunn': {'pop': 3536, 'tracts': 1},
'Eddy': {'pop': 2385, 'tracts': 1},
'Emmons': {'pop': 3550, 'tracts': 1},
'Foster': {'pop': 3343, 'tracts': 1},
'<NAME>': {'pop': 1680, 'tracts': 1},
'<NAME>': {'pop': 66861, 'tracts': 18},
'Grant': {'pop': 2394, 'tracts': 1},
| |
join_operations=None, min_area=1):
"""
Merge adjacent polygons with buffer and unary_union
:return: GeoDataFrame
"""
uu = self.gdf.copy().buffer(tolerance, join_style=2, cap_style=2).unary_union.buffer(-tolerance, join_style=2, cap_style=2)
if 'Multi' in uu.geom_type:
gdf = gpd.GeoDataFrame({'geometry': [Polygon(geom.exterior) for geom in uu]}, crs=self.gdf.crs)
else:
try:
gdf = gpd.GeoDataFrame({'geometry': [Polygon(uu.exterior)]}, crs=self.gdf.crs)
except:
gdf is None
if join_operations is not None:
gdf = Analyst(gdf, self.gdf.copy()).spatial_join(operations=join_operations)
raw_gdf = self.gdf.copy()
raw_gdf['id'] = raw_gdf.index
gdf['centroid'] = gdf.centroid.buffer(1)
gdf = gdf[gdf.area > min_area]
gdf = gdf.set_geometry('centroid')
gdf['parent_id'] = [int(i) for i in Analyst(gdf, self.gdf.copy().loc[:, ['id', 'geometry']]).spatial_join(operations=['max'])['id_max']]
gdf = gdf.set_geometry('geometry')
return gdf.drop('centroid', axis=1)
def extract_open_boundaries(self, inner_rings=True):
"""
Dissolve adjacent polygons and extract segments from original shape located on the outer boundary of dissolved geometry
:return: GeoDataFrame with MultiLineStrings
"""
# Explode shape to get segments
segments = self.explode()
segments['pid'] = list(self.gdf.loc[segments['parent_id'], 'pid'])
segments['length'] = segments.length
# Intersect with reduced polygons
reduced = self.reduce()
overlay = gpd.overlay(segments, reduced)
segments.crs = self.gdf.crs
segments = segments[~segments['sid'].isin(overlay['sid'])]
if inner_rings:
return segments
else:
blocks = self.dissolve()
blocks['geometry'] = blocks.buffer(-2)
overlay = gpd.overlay(segments, blocks)
return segments[~segments['sid'].isin(overlay['sid'])]
def extract_inner_rings(self, multi=False):
"""
Extract inner rings from polygons if exists
:return:
"""
# Explode shape to get segments
segments = self.explode()
segments['length'] = segments.length
# Intersect with reduced polygons
dissolved = self.dissolve()
reduced = Shape(dissolved).reduce()
overlay = gpd.overlay(segments, reduced)
return segments[segments['sid'].isin(overlay['sid'])]
def extract_vertices(self):
gdf = self.explode()
vertices = gpd.GeoDataFrame()
for i in gdf.index:
coord = gdf.loc[i, 'geometry'].coords
for pt in [Point(coord[0]), Point(coord[len(coord) - 1])]:
j = len(vertices)
vertices.loc[j, gdf.columns] = gdf.loc[i, :]
vertices.loc[j, 'geometry'] = pt
vertices = vertices.rename({'id': 'line_id'}, axis=1)
vertices['vid'] = vertices.index
vertices = vertices.drop_duplicates('geometry')
return vertices
def get_geom_types(self):
return [geom.geom_type for geom in self.gdf['geometry']]
def get_indicators(self, explode=False):
if explode: self.gdf = self.explode()
gdf = self.gdf.copy()
gdf['n_vertices'] = [len(geom.exterior.coords) for geom in gdf['geometry']]
gdf['area'] = [geom.area for geom in gdf['geometry']]
gdf['perimeter'] = [geom.length for geom in gdf['geometry']]
gdf['ch_perimeter'] = [geom.convex_hull.length for geom in gdf['geometry']]
# Calculate elongation based on minimum rotated rectangles
min_rot_rec = self.min_rot_rec()
gdf['vector'] = [geom[0] if geom is not None else None for geom in min_rot_rec['segments']]
gdf['azimuth'] = [azimuth(geom) if geom is not None else None for geom in gdf['vector']]
elongation = []
width = []
length = []
for lengths in min_rot_rec['lengths']:
if lengths is not None:
if max(lengths) != 0:
elongation.append(1 - (min(lengths) / max(lengths)))
else:
elongation.append(None)
width.append(min(lengths))
length.append(max(lengths))
else:
elongation.append(None)
width.append(None)
length.append(None)
gdf['elongation'] = elongation
gdf['width'] = width
gdf['length'] = length
return gdf
def get_closest(self, reference_geom, reset_index=True):
"""
For each base geometry (left) in the object's gdf, return the closest element
from a reference set (right) that is directly connected to the base geometry.
:param reference_geom: Shapely geometry to search to
:param reset_index: If true returns indexes different from output geometry
:return:
"""
gdf = self.gdf.copy()
reference_geom = reference_geom.copy()
reference_geom['ref_id'] = reference_geom.index
# Draw a line from centroid of each base element to closest point on reference layer
st = time.time()
gdf['nearest'] = [
scale(LineString([ctr, nearest_points(ctr, reference_geom.unary_union)[1]]), 1.1, 1.1, 1.1, 'centroid')
for ctr in gdf.centroid]
nearest = gdf.copy()
nearest = nearest.drop('geometry', axis=1).set_geometry('nearest')
nearest['base_id'] = nearest['id']
nearest['nearest'] = nearest.buffer(1)
nearest.crs = gdf.crs
if self.verbose: print(f"Nearest Points: {round((time.time() - st) / 60, 3)} minutes")
# Identify base line directly connected to reference layer
inters = gpd.overlay(reference_geom.loc[:, ['ref_id', 'geometry']], nearest.loc[:, ['base_id', 'nearest']])
ref_outs = [list(inters[inters['base_id'] == b_id]['ref_id'])[0] for b_id in gdf['id'].unique()]
out_gdf = reference_geom.loc[ref_outs, :].copy()
out_gdf['base_id'] = list(gdf['id'])
if reset_index:
out_gdf.index = list(out_gdf['base_id'])
out_gdf['id'] = out_gdf.index
return out_gdf
def get_largest_by_column(self, column):
"""
For elements within the same group in a defined column, get largest Shape.
:return: GeoDataFrame with one geometry per unique element of the input column
"""
gdf = self.gdf.copy()
assert column in gdf, KeyError(f"{column} not found in GeoDataFrame")
gdf['length'] = gdf.length
gdf = gdf.sort_values(by=[column, 'length'], ascending=False)
return gpd.GeoDataFrame({
column: [i for i in gdf[column].unique()],
'geometry': [list(gdf[gdf[column] == i]['geometry'])[0] for i in gdf[column].unique()]
}, crs=gdf.crs)
def move(self, reference_geom):
"""
Move elements of the gdf altogether to the centroid of the reference_geom
:return:
"""
gdf = self.gdf.copy()
gdf_ctr = self.gdf.unary_union.centroid
ref_ctr = reference_geom.centroid
gdf['geometry'] = [translate(geom, xoff=ref_ctr.x - gdf_ctr.x, yoff=ref_ctr.y - gdf_ctr.y) for geom in gdf['geometry']]
return gdf
def flip(self, x, y, reference_geom=None):
"""
Flip Shapes according to a reference geometry
:param x:
:param y:
:param reference_geom:
:return:
"""
gdf = self.gdf.copy()
gdf['geometry'] = [scale(geom, xfact=x, yfact=y, origin=reference_geom.unary_union.centroid) for geom in gdf['geometry']]
assert len(gdf['geometry']) == len(self.gdf['geometry'])
return gdf
def min_rot_rec(self, quick_run=False):
gdf = gpd.GeoDataFrame()
gdf['min_rot_rec'] = [geom.minimum_rotated_rectangle.boundary if geom.length > 1 else None for geom in
self.gdf['geometry']]
gdf['id'] = self.gdf.index
gdf['segments'] = [list(map(LineString, zip(ln.coords[:-1], ln.coords[1:]))) if ln is not None else None for ln
in gdf['min_rot_rec']]
gdf['lengths'] = [[int(ln.length) for ln in geom] if geom is not None else None for geom in gdf['segments']]
# Check if shape is a square (all sides of bounding box are equal)
result = all(element == list(gdf['lengths'])[0][0] for element in list(gdf['lengths'])[0])
if result:
largest = [[0, 2] for lengths in gdf['lengths']]
smallest = [[1, 3] for lengths in gdf['lengths']]
else:
largest = [[i for i, length in enumerate(lengths) if length >= max(lengths)]
if lengths is not None else None for lengths in gdf['lengths']]
smallest = [[i for i, length in enumerate(lengths) if length <= min(lengths)]
if lengths is not None else None for lengths in gdf['lengths']]
gdf['largest_segment'] = [geom[i[0]] if geom is not None else None for i, geom in zip(largest, gdf['segments'])]
gdf['largest_parallel'] = [geom[i[1]] if geom is not None else None for i, geom in zip(largest, gdf['segments'])]
gdf['smallest_segment'] = [geom[i[0]] if geom is not None else None for i, geom in zip(smallest, gdf['segments'])]
gdf['smallest_parallel'] = [geom[i[1]] if geom is not None else None for i, geom in zip(smallest, gdf['segments'])]
if not quick_run:
# Get line connecting the centroids of the smaller sides of bounding rectangle
gdf['meridian'] = [LineString([smallest.centroid, parallel.centroid]) if smallest is not None else None
for smallest, parallel in zip(gdf['smallest_segment'], gdf['smallest_parallel'])]
gdf['equator'] = [LineString([largest.centroid, parallel.centroid]) if largest is not None else None
for largest, parallel in zip(gdf['largest_segment'], gdf['largest_parallel'])]
# Get diagonal line
gdf['diagonal1'] = [LineString([Point(ln.coords[0]), Point(ln.coords[2])]) if ln is not None else None for ln in gdf['min_rot_rec']]
gdf['diagonal2'] = [LineString([Point(ln.coords[1]), Point(ln.coords[3])]) if ln is not None else None for ln in gdf['min_rot_rec']]
gdf['geometry'] = gdf['min_rot_rec']
return gdf
def min_bbox(self):
gdf = self.gdf.copy()
gdf['geometry'] = [Polygon(
[Point(geom.bounds[0], geom.bounds[1]), Point(geom.bounds[2], geom.bounds[1]),
Point(geom.bounds[2], geom.bounds[3]), Point(geom.bounds[0], geom.bounds[3])]
) for geom in gdf['geometry']]
return gdf
def offset_in(self, distance, polygon):
"""
Offsets a line string and keep only shapes located inside a polygon.
:param distance: float
:param polygon: Polygon
:return: GeoDataFrame
"""
shapes = gpd.GeoDataFrame(columns=self.gdf.columns)
for i in list(self.gdf.index):
shape = self.gdf.loc[i, 'geometry'].simplify(1, preserve_topology=True)
left_offset = shape.parallel_offset(distance=distance, side='left')
right_offset = shape.parallel_offset(distance=distance, side='right')
if left_offset.centroid.intersects(polygon):
l = len(shapes)
shapes.loc[l, :] = self.gdf.loc[i, :]
shapes.loc[l, 'geometry'] = left_offset
if right_offset.centroid.intersects(polygon):
l = len(shapes)
shapes.loc[l, :] = self.gdf.loc[i, :]
shapes.loc[l, 'geometry'] = right_offset
shapes = shapes.apply(pd.to_numeric, errors='ignore')
shapes = shapes.set_geometry('geometry')
shapes.crs = self.gdf.crs
return shapes
def connect_parallels(self, spines, width=20, tolerance=0.2):
"""
:param spines:
:param width:
:param tolerance:
:return:
"""
spines = relative_max_min(spines)
spines.crs = self.gdf.crs
# Load shape boundaries
boundaries = self.gdf.copy()
boundaries['geometry'] = boundaries.boundary
bnd_segments = Shape(boundaries).divorce()
bnd_segments['bound_id'] = bnd_segments.reset_index(drop=True).index
# Get relative XY coordinates for boundary line segments
bnd_segments = relative_max_min(bnd_segments)
bnd_segments['length'] = bnd_segments.length
# Extract boundary segments with XY coordinates similar to boundary XY coordinates (parallel)
for i in bnd_segments['bound_id'].unique():
sdf = spines[spines['id'] == i]
max_Yy = max(sdf['Y_y'], default=0) * (1 + tolerance)
min_Yy = min(sdf['Y_y'], default=0) * (1 - tolerance)
max_Xx = max(sdf['X_x'], default=0) * (1 + tolerance)
min_Xx = min(sdf['X_x'], default=0) * (1 - tolerance)
# Get parallel based on directions
parallel = bnd_segments[
(bnd_segments['bound_id'] == i) &
(((bnd_segments['X_x'] < max_Xx) & (bnd_segments['X_x'] > min_Xx)) |
((bnd_segments['Y_y'] < max_Yy) & (bnd_segments['Y_y'] > min_Yy)))
]
parallel = linemerge(list(parallel['geometry']))
if parallel.__class__.__name__ == 'LineString': parallel = [parallel]
# Divide spine and connect divisors with closest point on parallel segments
divisions = divide_line_by_length(sdf, length=width)
for points in divisions:
for pt in points:
for segment in parallel:
if segment.length > (width * 2):
j = len(spines)
spines.at[j, 'pol_id'] = i
spines.at[j, 'geometry'] = LineString([nearest_points(pt, segment)[1], pt])
return spines
def create_court(self, depth):
| |
COPval
#return the end effector location in world coords and the constraint location in world coords
def getCnstEffLocs(self):
trackBodyPos = self.cnstrntBody.to_world(x=self.cnstrntOnBallLoc) #self.cnstrntBody.com()
curEffPos = self.reachBody.to_world(x=self.reachBodyOffset)
return trackBodyPos, curEffPos
#return body torques to provide self.desExtFrcVal at toWorld(self.constraintLoc)
#provides JtransFpull component of equation
def getPullTau(self, useLinJacob, debug=False):
if (useLinJacob) :
self.useForce = self.desExtFrcVal
#using only linear : 3 rows x ndofs cols
Jpull = self.reachBody.linear_jacobian(offset=self.reachBodyOffset)
else :
#wrench
#TODO verify target orientation should be 0,0,0
self.useForce = np.zeros(6)
self.useForce[3:]=self.desExtFrcVal
#using linear and rotational == world
Jpull = self.reachBody.world_jacobian(offset=self.reachBodyOffset)
if(debug):
print('getPullTau : pull force being used : {} '.format(self.useForce))
JTrans = np.transpose(Jpull)
res = JTrans.dot(self.useForce)
JTransInv = np.linalg.pinv(JTrans)
#last 3 rows as lin component
return res, JTransInv, Jpull, Jpull[-3:,:]
######################################################
# abstract methods
######################################################
#build the configuration of the initial pose of the figure
@abstractmethod
def _makeInitPoseIndiv(self):
pass
#need to apply tau every step of sim since dart clears all forces afterward;
# this is for conducting any other per-sim step functions specific to skelHldr (like reapplying assist force)
@abstractmethod
def applyTau_priv(self):
pass
#special considerations for init pose setting - this is called every time skeleton is rest to initial pose
@abstractmethod
def _setToInitPose_Priv(self):
pass
#setup initial constructions for reward value calculations
@abstractmethod
def setInitRWDValues(self):
pass
#init to be called after skeleton pose is set
@abstractmethod
def postPoseInit(self):
pass
#functionality necessary before simulation step is executed @abstractmethod
def preStep(self, a):
pass
#individual instance class reset functionality called at end of reset
@abstractmethod
def _resetIndiv(self, dispDebug):
pass
#get the state observation from this skeleton - concatenate to whatever extra info we are sending as observation
@abstractmethod
def getObs(self):
pass
#calculate reward for this agent, see if it is done, and return informational dictionary (holding components of reward for example)
@abstractmethod
def calcRewardAndCheckDone(self, debug):
pass
######################################################
# debug and helper functions
######################################################
#called externally to debug end effector location
def dbg_getEffLocWorld(self):
return self.reachBody.to_world(x=self.reachBodyOffset)
#align sequence of values
def dotAligned(self, seq):
strNums = ['{:.5f}'.format(n) for n in seq]
dots = [s.find('.') for s in strNums]
m = max(dots)
return [' '*(m - d) + s for s, d in zip(strNums, dots)]
#will display current torque vector and RL-policy action vector values formatted with names of joints being applied to
def dbgShowTauAndA(self, name=' '):
print('\n{}Torques and causing Actions : '.format(name))
#should have a dof corresponding to each component of tau
dofs = self.skel.dofs
alignTauStr = self.dotAligned(self.tau)
alignAStr = self.dotAligned(self.a)
numDofs = len(dofs)
if(numDofs != len(self.tau)):
print('!!!!!! attempting to print torques that do not align with skeleton dofs')
return
for i in range(0,self.stTauIdx):
print('\tDof : {:20s} | Value : {} | Action : {:.5f}'.format(dofs[i].name, alignTauStr[i],0))
for i in range(self.stTauIdx,len(dofs)):
print('\tDof : {:20s} | Value : {} | Action : {}'.format(dofs[i].name, alignTauStr[i],alignAStr[(i-self.stTauIdx)]))
#will display passed torque values formatted with names of joints being applied to
def dbgShowTorques(self, tau, name=' '):
print('\n{}Torques : '.format(name))
#should have a dof corresponding to each component of tau
dofs = self.skel.dofs
alignTauStr = self.dotAligned(tau)
if(len(dofs) != len(tau)):
print('!!!!!! attempting to print torques that do not align with skeleton dofs')
return
for i in range(len(dofs)):
print('\tDof : {:20s} | Value : {}'.format(dofs[i].name, alignTauStr[i]))
#display min/max torques seen so far
def dbgDispMinMaxTorques(self):
if(self.monitorTorques):
#min/max torques seen
self.dbgShowTorques(self.monTrqDict['min'],name='Minimum Seen ')
print('\n')
self.dbgShowTorques(self.monTrqDict['max'],name='Maximum Seen ')
print('\n')
else:
print('Torques Not Monitored')
#called only to clear derived torques
def dbgResetTau(self):
#set tau to 0
self.tau = np.zeros(self.ndofs)
#debug functionality - show skel body names, joint names and dof names
def dbgShowSkelVals(self):
self.dbgShowSkelNames(self.skel.bodynodes, 'Body')
self.dbgShowSkelNames(self.skel.joints, 'Joint')
self.dbgShowSkelNames(self.skel.dofs, 'Dof')
#display skel-related object names
def dbgShowSkelNames(self, objs, typeStr=''):
numObjs = len(objs)
print('\n{} {} names : '.format(numObjs,typeStr))
for bidx in range(numObjs):
d = objs[bidx]
print('\t{}'.format(d.name))
def dbgShowDofLims(self):
lims = self.getObsLimits()
numDofs = self.skel.num_dofs()
print('\n{} Dof names and limit values : '.format(numDofs))
for bidx in range(numDofs):
d = self.skel.dof(bidx)
print('{} :\tMin : {:.3f}\tMax : {:.3f}\tMin Vel : {:.3f}\tMax Vel : {:.3f}'.format(d.name, lims['lowQLims'][bidx], lims['highQLims'][bidx], lims['lowDQLims'][bidx], lims['highDQLims'][bidx]))
#get dictionary of environmental/simulation variables used for optimization at reach body constraint location (where applicable)
def getOptVars(self):
res = {}
res['M']=self.skel.M
res['CfrcG']=self.skel.coriolis_and_gravity_forces()
res['jacobian']=self.reachBody.jacobian(offset=self.reachBodyOffset)
res['world_jacobian']=self.reachBody.world_jacobian(offset=self.reachBodyOffset)
res['linear_jacobian']=self.reachBody.linear_jacobian(offset=self.reachBodyOffset)
res['angular_jacobian']=self.reachBody.angular_jacobian()
return res
#class for skeleton holder specifically for the getup human
class ANASkelHolder(skelHolder, ABC):
#Static list of names of implemented reward components
rwdNames = ['eefDist','action','height','footMovDist','lFootMovDist','rFootMovDist','comcop','comcopvel','contacts','UP_COMVEL','X_COMVEL','Z_COMVEL']
def __init__(self, env, skel, widx, stIdx, fTipOffset):
skelHolder.__init__(self,env, skel,widx,stIdx, fTipOffset)
self.name = 'ANA : Agent Needing Assistance'
#set this so that bot doesn't get stuck in limbo REMOVE
#TODO : remove, repalce with vector ?
self.minUpVel = .001
#set to true to initialize assist force in apply_tau for training, set to false when using robot assistant
self.setAssistFrcEveryTauApply = False
#this is full time step - since human is only skel to be simulated with same actions multiple frames, human is only consumer of this
self.dtForAllFrames = self.env.dt
#this is # of sim steps in rollout before displaying debug info about reward
self.numStepsDBGDisp = 101
#distance thresholds
self.rwdTols = defaultdict(float)
# for end effector matching to ball location - distances above this are penalized harshly, distances below this are penalized very gently
self.rwdTols['eefDist'] = .05
# for com proj on ground dist to cop on ground - 10 cms
self.rwdTols['comcop'] = .1
#reward matrix base for action minimization - set joint dof idxs that we don't care as much about for minimization to values between 0 and 1
self.actPenDofWts = np.identity(self.numActDofs)
#which com key to use for height calculations - currently 'com' and 'head'
self.comHtKey = 'com'
# or
#self.comHtKey = 'head'
#whether or not to use best state from previous rollout as initial state on this rollout
self.checkBestState = False
##########################################
# reward function weights and var/scales for each component
# default value is 1.0
#specify weight and var dict names allowed
#rwdNames = ['eefDist','action','height','footMovDist','lFootMovDist','rFootMovDist','comcop','comcopvel','contacts','UP_COMVEL','X_COMVEL','Z_COMVEL']
#specify non-default values in default dict - not using default dict because it might hide bugs where wrong or unknown reward is called for wt/var
wtVals = defaultdict(lambda:1.0,{'eefDist':6.0,'action':.1,'height':10.0,'footMovDist':6.0,'lFootMovDist':6.0,'rFootMovDist':6.0,'UP_COMVEL':10.0, 'comcop':.01})
varVals = defaultdict(lambda:.1,{'action' : (1.0*self.sqrtNumActDofs), 'height':.5})
#this is list of components used used for reward function
rwdFuncsToUse = self.env.rwdCompsUsed
self.setRwdWtsVars(names=ANASkelHolder.rwdNames, wts=wtVals, varVals=varVals, rwdsToUse=rwdFuncsToUse)
#set weighting vectors for reward wts and variance/scales -
# uses dictionary and not default dict, so that any unrecognized weight values throw an exception - we don't want any unrecognized wt or var vals
#names are the names of the rwd components getting set
#wts is list of wt
def setRwdWtsVars(self, names, wts, varVals, rwdsToUse):
self.rwdWts = {}
self.rwdVars = {}
#specify reward components to check
self.rwdsToCheck = {}
for i in range(len(names)) :
k = names[i]
self.rwdWts[k] = wts[k]
self.rwdVars[k] = varVals[k]
self.rwdsToCheck[k] = (k in rwdsToUse)
#setup initial constructions for reward value calculations - called before pose is set
def setInitRWDValues(self):
avgInitFootLocAra = self.calcAvgFootBodyLoc()
#both feet avg location
avgInitFootLoc = avgInitFootLocAra[0]
#initial height - subtract this so we only reward increases in height - 0.87790457356624751
#assumes unmodified base skeleton pose in skel file is standing upright
#dict of bodies to be used for height calcs
self.comHtBodyDict = {}
self.comHtBodyDict['com'] = self.skel
self.comHtBodyDict['head'] = self.skel.body(self.headBodyName)
self.standHtOvFtDict = {}
#dict of heights standing of various bodies to be used for height calcs
for k,v in self.comHtBodyDict.items():
self.standHtOvFtDict[k] = v.com()[1] - avgInitFootLoc[1]#0.87790457356624751 for body COM
print('Init StandCOMHeight before pose (skel expected to be standing upright from src file config) - height above avg foot location : {}'.format(self.standHtOvFtDict['com']))
#dicts to hold min and max com velocity vectors for the various tracked skel bodies
self.minCOMVelVals = {}
self.maxCOMVelVals = {}
#min and max com vel vals to receive positive rewards - roots of parabola
#these should not be | |
#
# Dream MCMC
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pints
import numpy as np
class DreamMCMC(pints.MultiChainMCMC):
"""
Uses differential evolution adaptive Metropolis (DREAM) MCMC as described
in [1] to do posterior sampling from the posterior.
In each step of the algorithm N chains are evolved using the following
steps:
1. Select proposal::
x_proposed = x[i,r] + (1 + e) * gamma(delta, d, p_g) *
sum_j=1^delta (X[i,r1[j]] - x[i,r2[j]])
+ epsilon
where [r1[j], r2[j]] are random chain indices chosen (without replacement)
from the ``N`` available chains, which must not equal each other or ``i``,
where ``i`` indicates the current time step;
``delta ~ uniform_discrete(1,D)`` determines the number of terms to include
in the summation::
e ~ U(-b*, b*) in d dimensions;
gamma(delta, d, p_g) =
if p_g < u1 ~ U(0,1):
2.38 / sqrt(2 * delta * d)
else:
1
``epsilon ~ N(0,b)`` in ``d`` dimensions (where ``d`` is the dimensionality
of the parameter vector).
2. Modify random subsets of the proposal according to a crossover
probability CR::
for j in 1:N:
if 1 - CR > u2 ~ U(0,1):
x_proposed[j] = x[j]
else:
x_proposed[j] = x_proposed[j] from 1
If ``x_proposed / x[i,r] > u ~ U(0,1)``, then
``x[i+1,r] = x_proposed``; otherwise, ``x[i+1,r] = x[i]``.
Here b > 0, b* > 0, 1 >= p_g >= 0, 1 >= CR >= 0.
*Extends:* :class:`MultiChainMCMC`
[1] "Accelerating Markov Chain Monte Carlo Simulation by Differential
Evolution with Self-Adaptive Randomized Subspace Sampling",
2009, Vrugt et al.,
International Journal of Nonlinear Sciences and Numerical Simulation.
"""
def __init__(self, chains, x0, sigma0=None):
super(DreamMCMC, self).__init__(chains, x0, sigma0)
# Need at least 3 chains
if self._chains < 3:
raise ValueError('Need at least 3 chains.')
# Set initial state
self._running = False
# Current points and proposed points
self._current = None
self._current_log_pdfs = None
self._proposed = None
#
# Default settings
#
# Gaussian proposal std.
self._b = 0.01
# b* distribution for e ~ U(-b*, b*)
self._b_star = 0.01
# Probability of higher gamma versus regular
self._p_g = 0.2
# Determines maximum delta to choose in sums
self._delta_max = None
self.set_delta_max(min(3, self._chains - 2))
# Initial phase
self._initial_phase = True
# Variable or constant crossover mode
self._constant_crossover = False
# Constant CR probability
self._CR = 0.5
# Since of multinomial crossover dist for variable CR prob
self._nCR = 3
def ask(self):
""" See :meth:`pints.MultiChainMCMC.ask()`. """
# Initialise on first call
if not self._running:
self._initialise()
# Propose new points
# Note: Initialise sets the proposal for the very first step
if self._proposed is None:
self._proposed = np.array(self._current, copy=True)
for j in range(self._chains):
# Select initial proposal for chain j
delta = int(np.random.choice(self._delta_max, 1)[0] + 1)
if self._p_g < np.random.rand():
gamma = 2.38 / np.sqrt(2 * delta * self._n_parameters)
else:
gamma = 1.0
e = self._b_star * self._mu
e = np.random.uniform(-e, e)
dX = 0
for k in range(0, delta):
r1, r2 = self._draw(j)
dX += (1 + e) * gamma * (
self._current[r1] - self._current[r2])
self._proposed[j] += dX + np.random.normal(
loc=0, scale=np.abs(self._b * self._mu),
size=self._n_parameters)
# Set crossover probability
if self._constant_crossover:
CR = self._CR
else:
# Select CR from multinomial distribution
self._m[j] = np.nonzero(
np.random.multinomial(self._nCR, self._p))[0][0]
CR = (self._m[j] + 1) / self._nCR
self._L[self._m[j]] += 1
# Randomly set elements of proposal to back original
for d in range(self._n_parameters):
if 1 - CR > np.random.rand():
self._proposed[j][d] = self._current[j][d]
# Set as read only
self._proposed.setflags(write=False)
# Return proposed points
return self._proposed
def current_log_pdfs(self):
""" See :meth:`MultiChainMCMC._log_init()`. """
return self._current_log_pdfs
def _initialise(self):
"""
Initialises the routine before the first iteration.
"""
if self._running:
raise RuntimeError('Already initialised.')
# Propose x0 as first points
self._current = None
self._current_log_pdfs = None
self._proposed = np.array(self._x0, copy=True)
# Set proposal as read-only
self._proposed.setflags(write=False)
# Set mu
self._mu = np.mean(self._x0, axis=0)
# Set initial p, L and Delta
self._p = np.repeat(1.0 / self._nCR, self._nCR)
self._L = np.zeros(self._nCR)
self._delta = np.zeros(self._nCR)
# Create empty array of m indices
self._m = [0] * self._chains
# Iteration tracking for running variance
# See: https://www.johndcook.com/blog/standard_deviation/
# Algorithm based on Knuth TAOCP vol 2, 3rd edition, page 232
self._iterations = 0
self._varm = None
self._vars = None
self._variance = None
# Update sampler state
self._running = True
def in_initial_phase(self):
""" See :meth:`pints.MCMCSampler.in_initial_phase()`. """
return self._initial_phase
def _log_init(self, logger):
""" See :meth:`Loggable._log_init()`. """
# logger.add_float('Accept.')
# TODO
def _log_write(self, logger):
""" See :meth:`Loggable._log_write()`. """
# logger.log(self._acceptance)
# TODO
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'DiffeRential Evolution Adaptive Metropolis (DREAM) MCMC'
def needs_initial_phase(self):
""" See :meth:`pints.MCMCSampler.needs_initial_phase()`. """
return True
def set_initial_phase(self, initial_phase):
""" See :meth:`pints.MCMCSampler.needs_initial_phase()`. """
self._initial_phase = bool(initial_phase)
def tell(self, proposed_log_pdfs):
""" See :meth:`pints.MultiChainMCMC.tell()`. """
# Check if we had a proposal
if self._proposed is None:
raise RuntimeError('Tell called before proposal was set.')
# Ensure proposed_log_pdfs are numpy array
proposed_log_pdfs = np.array(proposed_log_pdfs)
# First points?
if self._current is None:
if not np.all(np.isfinite(proposed_log_pdfs)):
raise ValueError(
'Initial points for MCMC must have finite logpdf.')
# Accept
self._current = self._proposed
self._current_log_pdfs = np.copy(proposed_log_pdfs)
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return first samples for chains
return self._current
# Perform iteration
next = np.copy(self._current)
next_log_pdfs = np.copy(self._current_log_pdfs)
# Sample uniform numbers
u = np.log(np.random.uniform(size=self._chains))
# Get chains to be updated
i = u < (proposed_log_pdfs - self._current_log_pdfs)
# Update (part 1)
next[i] = self._proposed[i]
next_log_pdfs[i] = proposed_log_pdfs[i]
# Warm-up? Then update CR distribution based on current & previous
if self._initial_phase and not self._constant_crossover:
# Update running mean and variance
if self._iterations == 0:
self._varm = self._current
self._variance = self._vars = self._current * 0
else:
new_varm = self._varm + (self._current - self._varm) / (
self._iterations + 1)
self._vars += (self._current - self._varm) * (
self._current - new_varm)
self._varm = new_varm
self._variance = self._vars / (self._iterations + 1)
# Update CR distribution
delta = (next - self._current)**2
for j in range(self._chains):
for d in range(0, self._n_parameters):
self._delta[self._m[j]] += (
delta[j][d] / max(self._variance[j][d], 1e-11))
self._p = self._iterations * self._chains * self._delta
d1 = self._L * np.sum(self._delta)
d1[d1 == 0] += 1e-11
self._p /= d1
d2 = max(np.sum(self._p), 1e-11)
self._p /= d2
# Update iteration count for running mean/variance
self._iterations += 1
# Update (part 2)
self._current = next
self._current_log_pdfs = next_log_pdfs
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return samples to add to chains
self._current.setflags(write=False)
return self._current
def b(self):
"""
Returns the Gaussian scale coefficient used in updating the position of
each chain.
"""
return self._b
def b_star(self):
"""
Returns b*, which determines the weight given to other chains'
positions in determining new positions (see :meth:`set_b_star()`).
"""
return self._b_star
def constant_crossover(self):
"""
Returns ``True`` if constant crossover mode is enabled.
"""
return self._constant_crossover
def CR(self):
"""
Returns the probability of crossover occurring if constant crossover
mode is enabled (see :meth:`set_CR()`).
"""
return self._CR
def delta_max(self):
"""
Returns the maximum number of other chains' positions to use to
determine the next sampler position (see :meth:`set_delta_max()`).
"""
return self._delta_max
def _draw(self, i):
"""
Select 2 random chains, not including chain i.
"""
r1, r2 = np.random.choice(self._chains, 2, replace=False)
while(r1 == i or r2 == i or r1 == r2):
r1, r2 = np.random.choice(self._chains, 2, replace=False)
return r1, r2
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 8
def nCR(self):
"""
Returns the size of the discrete crossover probability distribution
(only used if constant crossover mode is disabled), see
:meth:`set_nCR()`.
"""
return self._nCR
def p_g(self):
"""
Returns ``p_g``. See :meth:`set_p_g()`.
"""
return self._p_g
def set_b(self, b):
"""
Sets the Gaussian scale coefficient used in updating the position of
each chain (must be non-negative).
"""
if b < 0:
raise ValueError(
'Gaussian scale coefficient must be non-negative.')
self._b = b
def set_constant_crossover(self, enabled):
"""
Enables/disables constant-crossover mode (must be bool).
"""
self._constant_crossover = True if enabled else | |
token type embedding
if encoder_history_states:
assert img_feats is None, "Cannot take image features while using encoder history states"
if img_feats is not None:
if self.img_feature_type == 'dis_code':
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_t': # transpose
code_emb = self.code_embeddings(img_feats)
code_emb = code_emb.permute(0, 2, 1)
img_embedding_output = self.img_embedding(code_emb)
elif self.img_feature_type == 'dis_code_scale': # left scaled
code_emb = self.code_embeddings(img_feats)
img_embedding_output = self.img_embedding(code_emb)
else:
img_embedding_output = self.img_embedding(img_feats)
if self.use_img_layernorm:
img_embedding_output = self.LayerNorm(img_embedding_output)
# add dropout on image embedding
img_embedding_output = self.dropout(img_embedding_output)
# concatenate two embeddings
embedding_output = torch.cat((embedding_output, img_embedding_output), 1)
encoder_outputs = self.encoder(x=embedding_output,
attn_mask=extended_attention_mask, head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
encoder_history_states=encoder_history_states,
return_dict=return_dict)
return encoder_outputs
'''if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)'''
def instance_bce_with_logits(logits, labels, reduction='mean'):
assert logits.dim() == 2
loss = F.binary_cross_entropy_with_logits(logits, labels, reduction=reduction)
if reduction == 'mean':
loss *= labels.size(1)
return loss
class DistilImageBertForSequenceClassification(DistilBertModel):
"""
Modified from BertForSequenceClassification to support oscar training.
"""
def __init__(self, config):
super(DistilImageBertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.loss_type = config.loss_type
self.config = config
if config.img_feature_dim > 0:
self.bert = DistilBertImgModel(config)
else:
self.bert = DistilBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'):
config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.hidden_size,
self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def init_code_embedding(self, em):
self.bert.code_embeddings.weight.data = em.clone()
def forward(self, input_ids, attention_mask=None, labels=None, head_mask=None, img_feats=None,
output_attentions=None, output_hidden_states=None, return_dict=None
):
outputs = self.bert(input_ids, attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats,
output_attentions=output_attentions, output_hidden_states=output_hidden_states,
return_dict=return_dict)
hidden_state = outputs[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
logits = self.classifier(pooled_output) # (bs, dim)
if labels is not None:
if self.num_labels == 1: # doing regression
loss_fct = MSELoss()
labels = labels.to(torch.float)
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.loss_type == 'kl':
# KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py
loss_fct = torch.nn.KLDivLoss(reduction="batchmean")
log_softmax = torch.nn.LogSoftmax(dim=-1)
reshaped_logits = logits.contiguous().view(-1, 3129)
reshaped_logits = log_softmax(reshaped_logits)
loss = loss_fct(reshaped_logits, labels.contiguous())
elif self.loss_type == 'bce': # [VQA]
loss = instance_bce_with_logits(logits, labels)
else: # cross_entropy [GQA, Retrieval, Captioning]
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class DistilImageBertForMultipleChoice(DistilBertModel):
"""
Modified from BertForMultipleChoice to support oscar training.
"""
def __init__(self, config):
super(DistilImageBertForMultipleChoice, self).__init__(config)
self.loss_type = config.loss_type
if config.img_feature_dim > 0:
self.bert = DistilBertImgModel(config)
else:
self.bert = DistilBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if hasattr(config, 'classifier'):
if not hasattr(config, 'cls_hidden_scale'): config.cls_hidden_scale = 2
if config.classifier == 'linear':
self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels)
elif config.classifier == 'mlp':
self.classifier = nn.Sequential(
nn.Linear(config.num_choice*config.hidden_size, config.hidden_size*config.cls_hidden_scale),
nn.ReLU(),
nn.Linear(config.hidden_size*config.cls_hidden_scale, self.config.num_labels)
)
else:
self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) # original
self.apply(self.init_weights)
def forward(self, input_ids, attention_mask=None, labels=None,
head_mask=None, img_feats=None, output_attentions=None,
output_hidden_states=None, return_dict=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_img_feats = img_feats.view(-1, img_feats.size(-2), img_feats.size(-1)) if img_feats is not None else None
if isinstance(self.bert, DistilBertImgModel):
outputs = self.bert(flat_input_ids, attention_mask=flat_attention_mask, head_mask=head_mask,
img_feats=flat_img_feats,output_attentions=output_attentions,
output_hidden_states=output_hidden_states, return_dict=return_dict)
else:
outputs = self.bert(flat_input_ids, attention_mask=flat_attention_mask, head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
pooled_output = self.dropout(pooled_output)
# reshaped_pool_output
reshaped_pool_output = pooled_output.view(-1, self.config.num_choice*(pooled_output.shape[1]))
logits = self.classifier(reshaped_pool_output)
if labels is not None:
if self.loss_type == 'bce':
loss = instance_bce_with_logits(logits, labels.view(-1, self.config.num_labels))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class DistilBertForImageCaptioning(CaptionPreTrainedModel):
"""
Bert for Image Captioning.
"""
def __init__(self, config):
super(DistilBertForImageCaptioning, self).__init__(config)
self.config = config
self.bert = DistilBertImgModel(config)
self.transform = BertPredictionHeadTransform(config)
bert_embedding_weight = self.bert.embeddings.word_embeddings.weight
self.decoder = nn.Linear(bert_embedding_weight.size(1),
bert_embedding_weight.size(0), bias=False)
self.loss = nn.CrossEntropyLoss(reduction='mean')
self.drop_worst_ratio = 0.2
def forward(self, *args, **kwargs):
is_decode = kwargs.get('is_decode', False)
if is_decode:
return self.generate(*args, **kwargs)
else:
return self.encode_forward(*args, **kwargs)
def encode_forward(self, input_ids, img_feats, attention_mask, masked_pos, masked_ids=None,
token_type_ids=None, position_ids=None, head_mask=None,
is_training=True, encoder_history_states=None):
outputs = self.bert(input_ids, img_feats=img_feats, attention_mask=attention_mask,
head_mask=head_mask, return_dict=False,
encoder_history_states=encoder_history_states)
'''if not return_dict:
return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
)'''
sequence_output = outputs[0][:, :masked_pos.shape[-1], :]
if is_training:
# num_masks_in_batch * hidden_size
sequence_output_masked = sequence_output[masked_pos==1, :]
transformed_output_masked = self.transform(sequence_output_masked)
class_logits = self.decoder(transformed_output_masked)
outputs = (class_logits,) + outputs[1:]
else:
class_logits = self.decoder(self.transform(sequence_output))
outputs = (class_logits,) + outputs[1:]
return outputs
def prepare_inputs_for_generation(self, curr_ids, past=None):
# NOTE: if attention is on, it should be the token used to mask words in training
mask_token_id = self.mask_token_id
batch_size = curr_ids.shape[0]
mask_ids = torch.full(
(batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device
)
def _slice(t, start, end):
if t is None:
return t
assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len)
return t[:, start: end]
def _remove_elements(t, start, end):
if t is None:
return t
assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len)
return torch.cat([t[:, :start], t[:, end:]], dim=1)
if past is None:
input_ids = torch.cat([curr_ids, mask_ids], dim=1)
curr_len = input_ids.shape[1]
full_len = self.max_seq_len + self.od_labels_len + self.img_seq_len
assert self.full_attention_mask.shape == (batch_size,
full_len, full_len)
def _remove_rows_cols(t, row_start, row_end, col_start, col_end):
t00 = t[:, :row_start, :col_start]
t01 = t[:, :row_start, col_end:]
t10 = t[:, row_end:, :col_start]
t11 = t[:, row_end:, col_end:]
res = torch.cat([torch.cat([t00, t01], dim=2), torch.cat([t10, t11],
dim=2)], dim=1)
assert res.shape == (t.shape[0], t.shape[1]-row_end+row_start,
t.shape[2]-col_end+col_start)
return res
seq_start = curr_len
seq_end = self.max_seq_len
attention_mask = _remove_rows_cols(self.full_attention_mask, seq_start,
seq_end, seq_start, seq_end)
masked_pos = _remove_elements(self.full_masked_pos, seq_start, seq_end)
token_type_ids = _remove_elements(self.full_token_type_ids, seq_start, seq_end)
position_ids = _remove_elements(self.full_position_ids, seq_start, seq_end)
img_feats = self.img_feats
if self.add_od_labels:
assert self.od_label_ids.shape[1] == self.od_labels_len
input_ids = torch.cat([input_ids, self.od_label_ids], dim=1)
else:
last_token = curr_ids[:, -1:]
# The representation of last token should be re-computed, because
# it depends on both self-attention context and input tensor
input_ids = torch.cat([last_token, mask_ids], dim=1)
start_pos = curr_ids.shape[1] - 1
end_pos = start_pos + input_ids.shape[1]
masked_pos = _slice(self.full_masked_pos, start_pos, end_pos)
# token_type_ids = _slice(self.full_token_type_ids, start_pos, end_pos)
# position_ids = _slice(self.full_position_ids, start_pos, end_pos)
img_feats = None
assert past[0].shape[0] == batch_size
if self.prev_encoded_layers is None:
assert start_pos == 1 # the first token after BOS
assert past[0].shape[1] == 2 + self.od_labels_len + self.img_seq_len
# reorder to [od_labels, img_feats, sentence]
self.prev_encoded_layers = [
torch.cat([x[:, 2:, :], x[:, :start_pos,:]], dim=1)
for x in past]
s2s = self.full_attention_mask[:, :self.max_seq_len,
:self.max_seq_len]
s2i = self.full_attention_mask[:, :self.max_seq_len,
self.max_seq_len:]
i2s = self.full_attention_mask[:, self.max_seq_len:,
:self.max_seq_len]
i2i = self.full_attention_mask[:, self.max_seq_len:,
self.max_seq_len:]
self.full_attention_mask = torch.cat(
[torch.cat([i2i, i2s], dim=2),
torch.cat([s2i, s2s], dim=2)],
dim=1)
else:
assert start_pos > 1
assert past[0].shape[1] == 2
self.prev_encoded_layers = [torch.cat([x, p[:, :-1, :]], dim=1)
for x, p in zip(self.prev_encoded_layers, past)]
attention_mask = self.full_attention_mask[:,
self.od_labels_len+self.img_seq_len+start_pos: self.od_labels_len+self.img_seq_len+end_pos,
:self.od_labels_len+self.img_seq_len+end_pos]
return {'input_ids': input_ids, 'img_feats': img_feats,
'masked_pos': masked_pos, 'attention_mask': attention_mask,
'is_training': False,
'encoder_history_states': self.prev_encoded_layers}
def get_output_embeddings(self):
return self.decoder
def generate(self, img_feats, attention_mask, masked_pos, token_type_ids=None,
position_ids=None, head_mask=None, input_ids=None, max_length=None,
do_sample=None, num_beams=None, temperature=None, top_k=None, top_p=None,
repetition_penalty=None, bos_token_id=None, pad_token_id=None,
eos_token_ids=None, mask_token_id=None, length_penalty=None, num_return_sequences=None,
num_keep_best=1, is_decode=None,
add_od_labels=False, od_labels_start_posid=None,
use_cbs=False, fsm=None, num_constraints=None,
min_constraints_to_satisfy=None, use_hypo=False,
):
""" Generates captions given image features
"""
assert is_decode
batch_size = img_feats.shape[0]
self.img_seq_len = img_feats.shape[1]
self.max_seq_len = max_length
self.mask_token_id = mask_token_id
self.prev_encoded_layers = None
# NOTE: num_keep_best is not equavilant to num_return_sequences
# num_keep_best is the number of hypotheses to keep in beam search
# num_return_sequences is the repeating times of input, coupled with
# do_sample=True can generate more than one samples per image
self.num_keep_best = num_keep_best
vocab_size = self.config.vocab_size
if not use_cbs:
num_fsm_states = 1
else:
b, num_fsm_states, f1, v = fsm.shape
assert b==batch_size and v==vocab_size and f1==num_fsm_states
self.add_od_labels = add_od_labels
# avoid position_ids collision of caption and od labels
self.od_labels_start_posid = max(od_labels_start_posid, self.max_seq_len)
if self.add_od_labels:
# get od labels part from input_ids
assert input_ids.shape[0] == batch_size
od_label_ids = input_ids[:, self.max_seq_len:]
self.od_labels_len = input_ids.shape[1] - self.max_seq_len
self.od_label_ids = self._expand_for_beams(od_label_ids, num_beams,
num_fsm_states)
input_ids = None
else:
self.od_labels_len = 0
self.od_label_ids = None
assert input_ids.shape == (batch_size, self.max_seq_len)
input_ids = None
if input_ids is None:
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Tintri, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import datetime
import argparse
import json
import smtplib
import tintri_1_1 as tintri
from email.mime.text import MIMEText
"""
This Python script generates a recommendation.
Command usage:
"""
# For exhaustive messages on console, make it to True; otherwise keep it False
debug_mode = False
beans = "com.tintri.api.rest.v310.dto.domain.beans."
# Global text
output_text = []
# Class for VMstore pool.
class VmstorePool:
def __init__(self, name, uuid):
self.name = name
self.uuid = uuid
self.reco_uuid = None
def get_name(self):
return self.name
def get_uuid(self):
return self.uuid
def get_reco_uuid(self):
return self.reco_uuid
def set_reco_uuid(self, reco_uuid):
self.reco_uuid = reco_uuid
# Output functions
def print_with_prefix(prefix, out):
print(prefix + out)
return
def print_debug(out):
if debug_mode:
print_with_prefix("[DEBUG] : ", out)
return
def print_info(out):
print_with_prefix("[INFO] : ", out)
return
def print_error(out):
print_with_prefix("[ERROR] : ", out)
return
# Buffers the output for later.
def buffer(buf):
#print_info(buf)
output_text.append(buf)
# Format JSON into something readable.
def format_json(out):
return json.dumps(out, sort_keys=True, indent=4, separators=(',', ': '))
# Convert VM UUIDs to VM names
# This is a simple but time consuming way.
def get_vm_names(server, tgc_sess_id, vm_uuids):
vm_names = []
for vm_uuid in vm_uuids:
vm_url = "/v310/vm/" + vm_uuid
r = tintri.api_get(server, vm_url, tgc_sess_id)
print_debug("The JSON response of the vm get invoke to the server " + \
server + " is: " + r.text)
vm_info = r.json()
vm_names.append(vm_info["vmware"]["name"])
return vm_names
# Return the the VMstore pools from a TGC server.
def get_pools(server, tgc_sess_id):
vmstore_pools = []
url = "/v310/vmstorePool"
r = tintri.api_get(server, url, tgc_sess_id)
print_debug("The JSON response of the get invoke to the server " +
server + " is: " + r.text)
vm_paginated_result = r.json()
num_pools = int(vm_paginated_result["filteredTotal"])
if (num_pools) == 0:
raise tintri.TintriRequestsException("No VMstore Pools present")
# Load up the pools
items = vm_paginated_result["items"]
for pool in items:
print_info(pool["name"] + ": " + pool["uuid"]["uuid"])
vmstore_pool = VmstorePool(pool["name"], pool["uuid"]["uuid"])
vmstore_pools.append(vmstore_pool)
return vmstore_pools
# Print the recommendation issues.
def get_issues(reco):
if "issues" in reco:
buffer("Issues")
issues = reco["issues"]
for issue in issues:
name = issue["vmStoreDisplayName"]
if "flashInfo" in issue:
buffer(" " + name + ": Flash - " + issue["flashInfo"]["summary"])
if "iopsInfo" in issue:
buffer(" " + name + ": IOPS - " + issue["iopsInfo"]["summary"])
if "spaceInfo" in issue:
buffer(" " + name + ": Space - " + issue["spaceInfo"]["summary"])
else:
buffer("No issues")
# Get the action issue types.
# There is an empty issue types bug.
def get_action_issue_types(actions):
issue_types_str = ""
for action in actions:
issueTypes = action["issueTypes"]
if (len(issueTypes) == 0):
return "UNKNOWN"
# Collect issue types
for issueType in issueTypes:
issue_types_str += issueType + ","
issue_types_str = issue_types_str[:-1] # remove trailing comma
return issue_types_str
# Print the action groups
def get_action_groups(reco):
if not ("actionGroups" in reco):
buffer("No ation groups")
return
buffer("Action Groups")
action_groups = reco["actionGroups"]
print_debug("Groups: " + format_json(action_groups))
for action_group in action_groups:
actions = action_group["actions"]
issueTypes = get_action_issue_types(actions)
buffer(" Actions for " + issueTypes)
for action in actions:
if ("targetVmDisplayName" in action):
vm_display_name = action["targetVmDisplayName"]
else:
vm_display_name = action["targetVmTintriUuid"]
buffer(" " + vm_display_name + " on " + action["sourceDatastoreDisplayName"] + \
" migrates to " + action["destinationDatastoreDisplayName"])
# Get the outcome summary.
def get_my_summary(server_name, sessiond_id, outcome):
my_summary = ""
if "flashInfo" in outcome:
my_summary += outcome["flashInfo"]["issueType"] + ": "
my_summary += str(outcome["flashInfo"]["flashHitRatePercent"]) + " percent predicted flash hit rate"
elif "iopsInfo" in outcome:
my_summary += outcome["iopsInfo"]["issueType"] + ": "
my_summary += str(outcome["iopsInfo"]["predictedOutcomeLoadWeekPercent"]) + " percent predicted load"
elif "protectionInfo" in outcome:
my_summary += outcome["protectionInfo"]["issueType"] + ": "
if ("summary" in outcome["protectionInfo"]):
my_summary += outcome["protectionInfo"]["summary"]
else:
my_summary += "VMs not able to replicatte: "
if ("vmTintriUuids" in outcome["protectionInfo"]):
vm_names = get_vm_names(server_name, sessiond_id, \
outcome["protectionInfo"]["vmTintriUuids"])
my_summary += " ".join(vm_names)
else:
my_summary += " No UUIDs"
elif "spaceInfo" in outcome:
my_summary += outcome["spaceInfo"]["issueType"] + ": "
my_summary += str(outcome["spaceInfo"]["spaceChangedPhysicalGiB"]) + " change in GiB"
return my_summary
# Print outcomes
def get_outcomes(server_name, sessiond_id, reco):
if (not ("expectedOutcomes" in reco)):
buffer("No outcomes")
return
buffer("Outcomes")
outcomes = reco["expectedOutcomes"]
for outcome in outcomes:
my_summary = get_my_summary(server_name, sessiond_id, outcome)
buffer(" " + outcome["vmStoreDisplayName"] + ": " + my_summary)
print_debug(format_json(outcome))
# Get the current recommendation
def get_current_reco(server, tgc_sess_id, pool):
print_debug("Looking for recommendation on pool " + pool.get_name())
reco_url = "/v310/vmstorePool/" + pool.get_uuid() + "/recommendation/current"
r = tintri.api_get(server, reco_url, tgc_sess_id)
print_debug("The JSON response of the reco get invoke to the server " +
server + " is: " + r.text)
reco = r.json()
return reco
# Execute and accept the recommendation
def execute_reco(server, tgc_sess_id, pool):
reco_url = "/v310/vmstorePool/" + pool.get_uuid() + "/recommendation/" + \
pool.get_reco_uuid() + "/accept"
r = tintri.api_post(server, reco_url, None, tgc_sess_id)
print_debug("The JSON response of the accept reco invoke to the server " +
server + " is: " + r.text)
if (r.status_code != 204):
msg = "The HTTP response for the accept reco post invoke to the server is " + \
server + "not 200, but is: " + str(r.status_code) + "."
raise tintri.TintriApiException(msg, r.status_code, reco_url, "No payload", r.text)
# Send e-mail.
def send_email(server_name, from_addr, to_addrs, smtp_server, output_text):
out_buf = ""
for text in output_text:
out_buf += text + "\n"
msg = MIMEText(out_buf)
msg['Subject'] = "VM Scale-out Recommendations from TGC " + server_name
msg['From'] = from_addr
msg['To'] = ','.join(to_addrs)
print_info("SMTP server: " + smtp_server)
print_info("From: " + msg['From'])
print_info("To: " + msg['To'])
print_info("MIME text:\n" + str(msg) + "\n")
try:
s = smtplib.SMTP(smtp_server)
s.sendmail(from_addr, to_addrs, msg.as_string())
s.quit()
except smtplib.SMTPException as smtp_err:
print_error("SMTP error: " + smtp_err.__str__())
# main
accept_reco = False
from_email = ""
to_smail = ""
smtp_server = ""
pools = {}
# Forge the command line argument parser.
gen_descrip = "Get available recommendations and print. " + \
"Optionally send mail and/or accept recommendation."
epilog = "--you and --me are required to send e-mail in the form [email protected]. " + \
"If --smtp is not sepcified then, smtp defaults to smtp.x.y."
parser = argparse.ArgumentParser(description=gen_descrip, epilog=epilog)
parser.add_argument("server_name", help="TGC server name")
parser.add_argument("user_name", help="TGC user name")
parser.add_argument("password", help="<PASSWORD>")
parser.add_argument("--accept", action = "store_true", help="accept the recommendation")
parser.add_argument("--you", help="e-mail address to send ([email protected])")
parser.add_argument("--me", help="e-mail address to send from ([email protected])")
parser.add_argument("--smtp", help="SMTP server. Default: 'smtp.x.y>'")
args = parser.parse_args()
# Check for an e-mail address.
if args.me != None:
from_email = args.me
print_info("from e-mail: " + args.me)
if args.you != None:
to_email = args.you
print_info("to e-mail: " + args.you)
if (args.me != None and args.you != None):
if args.smtp != None:
smtp_server = args.smtp
else:
from_email_parts = from_email.split("@")
smtp_server = "smtp." + from_email_parts[1]
print_info("Default SMTP server: " + smtp_server)
# Check for recommendation acceptance.
if args.accept:
accept_reco = True
print_info("Accept recommendation")
# Collect the required parameters.
server_name = args.server_name
user_name = args.user_name
password = args.password
# Get the product name
try:
r = tintri.api_version(server_name)
json_info = r.json()
preferred_version = json_info['preferredVersion']
product_name = json_info['productName']
if json_info['productName'] != "Tintri Global Center":
raise tintri.TintriRequestsException("server needs to be a TGC.")
versions = preferred_version.split(".")
major_version = versions[0]
minor_version = int(versions[1])
if major_version != "v310":
raise tintri.TintriRequestsException("Incorrect major version: " + major_version + ". Should be v310.")
if minor_version < 51:
raise tintri.TintriRequestsException("Incorrect minor Version: " + minor_version + ". Should be 51 or greater")
# Login to Tintri server
session_id = tintri.api_login(server_name, user_name, password)
except tintri.TintriRequestsException as tre:
print_error(tre.__str__())
exit(-2)
except tintri.TintriApiException as tae:
print_error(tae.__str__())
exit(-3)
# Let's get to work.
reco_available = False
try:
pools = get_pools(server_name, session_id)
# For each pool, get the current recommendation
for | |
<filename>pydmd/mosesdmd_grouped.py
"""
Derived module from dmdbase.py for higher order dmd.
Reference:
- <NAME>, <NAME>, Higher Order Dynamic Mode Decomposition.
Journal on Applied Dynamical Systems, 16(2), 882-925, 2017.
"""
import numpy as np
import scipy as sp
from scipy.linalg import pinv2
from mosessvd import MOSESSVD
from numba import jit
from past.utils import old_div
from .mosesdmdbase import MOSESDMDBase
def pinv(x): return pinv2(x, rcond=10 * np.finfo(float).eps)
class MOSESDMD_grouped(MOSESDMDBase):
"""
MOSESDMD for processing multiple groups of sequential snapshots.
Input is a list of all the groups of snapshots
"""
def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False, d=1,
chunk_size=None, dtype=np.complex64, projection=True,
sqrt_K=True, compute_amplitudes_method=0):
super(MOSESDMD_grouped, self).__init__(svd_rank, tlsq_rank, exact, opt)
self.d = d
self.chunk_size = chunk_size
self.U = None
self.s = None
self.V = None
self.K_list = None
self.M = None
self.dtype = dtype
self.projection = projection
self.sqrt_K = sqrt_K
self.compute_amplitudes_method = compute_amplitudes_method
def linsolve(self, A, B):
return np.matmul(B, np.linalg.inv(A))
# @profile
def fit(self, X):
"""
Compute the Dynamic Modes Decomposition to the input data.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
"""
for i in range(len(X)):
if X[i].dtype != self.dtype:
X[i] = X[i].astype(self.dtype)
# convert the input list to a tuple
# necessary for numba
X = tuple(X)
self._snapshots = X
# X, Y = self._compute_tlsq(X, Y, self.tlsq_rank) not implemented
msvd = MOSESSVD(rank=self.svd_rank)
# calculate the width of M
M_width = 0
for group in self._snapshots:
M_width += group.shape[1] - self.d + 1
# number of full chunks that fit in the stacked snapshots.
whole_chunks = int(np.floor(M_width / self.chunk_size)) - 1
# MOSES SVD iteration loop
i = -1
for i in range(whole_chunks):
chunk = self.get_chunk(self.snapshots, self.chunk_size, i, self.d, False)
msvd.update(chunk)
# final chunk that contains the remaining snapshots
chunk = self.get_chunk(self.snapshots, self.chunk_size, i+1, self.d, True)
msvd.update(chunk)
# get the SVD matrices
U, s, V = msvd.S.astype(self.dtype), msvd.Gamma.astype(self.dtype), msvd.Q.astype(self.dtype)
self.U, self.s, self.V = U, s, V
M = np.zeros((self.svd_rank, M_width)).astype(self.dtype)
U_conj = np.ascontiguousarray(U.conj().T) # for M_projection_value()
# calculate M
if self.projection:
# loop that projects the stacked snapshots onto U
for i in range(self.svd_rank):
M[i, :] = self.M_projection_value(self._snapshots, U_conj, i, self.d, self.dtype)
else:
M = s.dot(V.conj().T)
self.M = M
# calculates which collumns to delete to get MX and MY
# for MX the first collumn for each time series is deleted
# for MY the last collumn fr each time series is deleted
len_snaps_each = np.array([group.shape[1] - self.d + 1 for group in self._snapshots])
ind_snaps_groups = np.array([0])
ind_snaps_groups = np.append(ind_snaps_groups, np.cumsum(len_snaps_each))
ind_del_0 = ind_snaps_groups[:-1]
ind_del_1 = ind_snaps_groups[1:] - 1
MX = np.delete(M, ind_del_1, axis=1)
MY = np.delete(M, ind_del_0, axis=1)
Kf = MY.dot(pinv(MX))
Kb = MX.dot(pinv(MY))
Kbinv = pinv(Kb)
# How to calculate K from Kb and Kf
if self.sqrt_K == "mean":
K = (Kf + Kbinv) / 2
if self.sqrt_K == "back":
K = Kbinv
elif self.sqrt_K:
K = sp.linalg.sqrtm(Kf.dot(Kbinv))
else:
K = Kf
self.Atilde = K
K_eigval, K_eigvec = np.linalg.eig(K)
self._eigs = K_eigval
modes_full = U.dot(K_eigvec.astype(self.dtype))
self._modes = modes_full[:self._snapshots[0].shape[0]]
# Default timesteps
self.original_time = {'t0': 0, 'tend': self._snapshots[0].shape[1] - 1, 'dt': 1}
self.dmd_time = {'t0': 0, 'tend': self._snapshots[0].shape[1] - 1, 'dt': 1}
"""
Determines the amplitude computation method
0: Globally fitted amplitudes using U S V, but truncated to the length of a single snapshot
1: Globally fitted amplitudes using full U S V, can cause memory crash
2: Amplitudes of the first time series
"""
if self.compute_amplitudes_method == 0:
self._b = self._compute_amplitudes_average(modes_full, self._snapshots,
self._eigs, self.opt, method=0)
if self.compute_amplitudes_method == 1:
self._b = self._compute_amplitudes_average(modes_full, self._snapshots,
self._eigs, self.opt, method=1)
if self.compute_amplitudes_method == 2:
self._b = self._compute_amplitudes(self._modes, self._snapshots[0],
self._eigs, self.opt)
return self
# get the i-th reconstructed time series
def reconstructed_data_i(self, i):
return self.modes.dot(self.dynamics_i(i))
# get the dynamics of the i-th reconstructed time series
def dynamics_i(self, i):
self.original_time = {'t0': 0, 'tend': self._snapshots[i].shape[1] - 1, 'dt': 1}
self.dmd_time = {'t0': 0, 'tend': self._snapshots[i].shape[1] - 1, 'dt': 1}
amplitudes = self._compute_amplitudes(self._modes, self._snapshots[i],
self._eigs, self.opt)
omega = old_div(np.log(self.eigs), self.original_time['dt'])
vander = np.exp(
np.outer(omega, self.dmd_timesteps - self.original_time['t0']))
return vander * amplitudes[:, None]
# compute the globally fitted initial amplitudes
def _compute_amplitudes_average(self, modes, snapshots, eigs, opt, method):
"""
Compute the amplitude coefficients for each trajectory. If `opt` is False the amplitudes
are computed by minimizing the error between the modes and the first
snapshot; if `opt` is True the amplitudes are computed by minimizing
the error between the modes and all the snapshots, at the expense of
bigger computational cost.
:param numpy.ndarray modes: 2D matrix that contains the modes, stored
by column.
:param numpy.ndarray snapshots: 2D matrix that contains the original
snapshots, stored by column.
linear operator.
:param bool opt: flag for computing the optimal amplitudes of the DMD
modes, minimizing the error between the time evolution and all
the original snapshots. If false the amplitudes are computed
using only the initial condition, that is snapshots[0].
:return: the amplitudes array
:rtype: numpy.ndarray
References for optimal amplitudes:
Jovanovic et al. 2014, Sparsity-promoting dynamic mode decomposition,
https://hal-polytechnique.archives-ouvertes.fr/hal-00995141/document
"""
if opt:
# compute the vandermonde matrix
timesteps = []
dt = self.original_time['dt']
for sn in snapshots:
t0 = 0
t1 = dt * (sn.shape[1] - 1)
timesteps.append(np.arange(t0, t1+1e-10, dt))
timesteps = np.hstack(timesteps)
# use the first n rows for the computation
# using the full matrices is very expensive
n = self._snapshots[0].shape[0]
if method == 1:
U, s, V = self.U[:n], np.diag(self.s)[:n], self.V.conj().T
modes = modes[:n]
else:
U, s, V = self.U, np.diag(self.s), self.V.conj().T
timesteps = timesteps[:V.shape[1]]
omega = old_div(np.log(eigs), dt)
vander = np.exp(
np.multiply(*np.meshgrid(omega, timesteps))).T
P = np.multiply(
np.dot(modes.conj().T, modes),
np.conj(np.dot(vander, vander.conj().T)))
tmp = (np.dot(np.dot(U, np.diag(s)), V)).conj().T
q = np.conj(np.diag(np.dot(np.dot(vander, tmp), modes)))
a = np.linalg.solve(P, q)
else:
a = np.linalg.lstsq(modes, snapshots.T[0], rcond=None)[0]
return a
@staticmethod
@jit(nopython=True)
def M_projection_value(snapshots, U_conj, index_i, d, dtype):
"""
Generates the i-th row from the matrix product of U and the stacked snapshots.
This projects the stacked snapshots to the subspace of U
Parameters
----------
snapshots : numpy.ndarray
Snapshot matrix
U_conj : numpy.ndarray
Complex conjugate of U matrix. It is more efficient to do the
conjugate transpose outside this method
index_i : int
Index i for the M matrix
d : int
stacking depth of the snapshots
dtype : numpy.dtype
Target datatype.
Returns
-------
value_row : The i-th row of M
"""
U_row = U_conj[index_i]
snapshot_length = snapshots[0].shape[0]
length_j = 0
for i in snapshots:
length_j += i.shape[1] - d + 1
value_row = np.zeros(length_j).astype(dtype)
index_j = 0
for group_ind in range(len(snapshots)):
group = snapshots[group_ind]
for group_j in range(group.shape[1] - d + 1):
value = dtype(0)
for m_slice_nr in range(d):
m_slice = group[:, group_j+d-1 - m_slice_nr]
u_slice = U_row[m_slice_nr * snapshot_length : (m_slice_nr+1) * snapshot_length]
value += u_slice.dot(m_slice)
value_row[index_j] = value
index_j += 1
return value_row
# This is a very ugly method. Be warned
@staticmethod
def get_chunk(snapshots, chunk_size, i, d, get_remaining):
"""
This method generates stacked snapshot data chunks for the MOSES SVD loop.
It handles the stacking of the snapshots. It ensures proper handling of
both eds of a time series. It ensures that data from one time series
doesn't bleed over to an adjacent one at both ends due to the time-delay stacking.
The last chunk will usually be between chunk_size and 2*chunk_size in size.
This is done to avoid generating a small final chunk as it can break MOSES SVD.
Parameters
----------
snapshots : numpy.ndarray
Snapshot matrix
chunk_size : int
Desired size of a chunk.
i : int
Index of the chunk.
d : int
stacking depth of the snapshots
get_remaining : boolean
If set to True, generates a chunk containing all the ramaining data.
Intended for the final chunk.
Returns
-------
chunk : numpy.ndarray
A chunk of stacked snapshot data for MOSES SVD.
"""
"""
The way this works is by generating arrays that label each snapshot by
the index of the source time series (group_numbers) and
a modified index that excludes the first d-1 snapshots from each series (effective_indexes).
The start and end j indexes in the stacked snapshot matrix is calculated.
numpy.nonzero is then | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
import numpy as np
import unittest as ut
from numpy.lib import recfunctions as rfn
from bapsflib._hdf.maps.controls.waveform import HDFMapControlWaveform
from bapsflib.utils import _bytes_to_str
from ..file import File
from ..helpers import (
build_shotnum_dset_relation,
condition_controls,
condition_shotnum,
do_shotnum_intersection,
)
from . import TestBase, with_bf
class TestBuildShotnumDsetRelation(TestBase):
"""Test Case for build_shotnum_dset_relation"""
def setUp(self):
# setup HDF5 file
super().setUp()
self.f.add_module("Waveform", mod_args={"n_configs": 1, "sn_size": 100})
self.mod = self.f.modules["Waveform"]
def tearDown(self):
super().tearDown()
@property
def cgroup(self):
return self.f["Raw data + config/Waveform"]
@property
def map(self):
return HDFMapControlWaveform(self.cgroup)
def test_simple_dataset(self):
"""
Tests for a dataset containing recorded data for a single
configuration.
"""
# -- dset with 1 shotnum ----
self.mod.knobs.sn_size = 1
self.assertInRangeSN()
self.assertOutRangeSN()
# -- typical dset with sequential shot numbers ----
self.mod.knobs.sn_size = 100
self.assertInRangeSN()
self.assertOutRangeSN()
# -- dset with non-sequential shot numbers ----
self.mod.knobs.sn_size = 100
data = self.cgroup["Run time list"][...]
data["Shot number"] = np.append(
np.arange(5, 25, dtype=np.uint32),
np.append(
np.arange(51, 111, dtype=np.uint32), np.arange(150, 170, dtype=np.uint32)
),
)
del self.cgroup["Run time list"]
self.cgroup.create_dataset("Run time list", data=data)
self.assertInRangeSN()
self.assertOutRangeSN()
def test_complex_dataset(self):
"""
Tests for a dataset containing recorded data for a multiple
configurations.
"""
# define multiple configurations for one dataset
self.mod.knobs.n_configs = 3
# -- dset with 1 shotnum ----
self.mod.knobs.sn_size = 1
self.assertInRangeSN()
self.assertOutRangeSN()
# -- typical dset with sequential shot numbers ----
self.mod.knobs.sn_size = 100
self.assertInRangeSN()
self.assertOutRangeSN()
# -- dset with non-sequential shot numbers ----
self.mod.knobs.sn_size = 100
data = self.cgroup["Run time list"][...]
sn_arr = np.append(
np.arange(5, 25, dtype=np.uint32),
np.append(
np.arange(51, 111, dtype=np.uint32), np.arange(150, 170, dtype=np.uint32)
),
)
data["Shot number"][0::3] = sn_arr
data["Shot number"][1::3] = sn_arr
data["Shot number"][2::3] = sn_arr
del self.cgroup["Run time list"]
self.cgroup.create_dataset("Run time list", data=data)
self.assertInRangeSN()
self.assertOutRangeSN()
# -- dset without a configuration fields ----
self.mod.knobs.sn_size = 50
data = self.cgroup["Run time list"][...]
data = rfn.rename_fields(data, {"Configuration name": "oops"})
del self.cgroup["Run time list"]
self.cgroup.create_dataset("Run time list", data=data)
cdset = self.cgroup["Run time list"]
with self.assertRaises(ValueError):
build_shotnum_dset_relation(
np.empty(5, dtype=np.uint32), cdset, "Shot number", self.map, "config01"
)
def assertInRangeSN(self):
"""
Assert shot numbers cases with in-range of dataset shot numbers.
"""
cdset = self.cgroup["Run time list"]
shotnumkey = "Shot number"
configkey = "Configuration name"
last_sn = cdset[-1, "Shot number"]
shotnum_list = [
[10],
[50, 51],
[50, 60],
[1, self.mod.knobs.sn_size],
[50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
[1, 11, 21, 31, 41, 51, 61, 71, 81, 91],
]
if last_sn > 2:
shotnum_list.append([last_sn - 1])
for og_shotnum in shotnum_list:
if og_shotnum == [1, 1]:
continue
sn_arr = np.array(og_shotnum, dtype=np.uint32)
for cconfn in self.map.configs:
index, sni = build_shotnum_dset_relation(
sn_arr, cdset, shotnumkey, self.map, cconfn
)
self.assertSNSuite(
sn_arr, index, sni, cdset, shotnumkey, configkey, cconfn
)
def assertOutRangeSN(self):
"""
Assert shot number cases where some shot numbers are out of
range of the dataset shotnumbers.
"""
# Note: condition_shotnum() will ensure shotnum >= 0 so
# build_shotnum_dset_relation() does not handle this
#
# - one above largest shot number
# - out of range above (sn_size+1, sn_size+10, sn_size+100)
# and valid
#
shotnum_list = [
[self.mod.knobs.sn_size + 1],
[self.mod.knobs.sn_size + 1, self.mod.knobs.sn_size + 10],
[10, 15, self.mod.knobs.sn_size + 1],
[
10,
15,
self.mod.knobs.sn_size + 1,
self.mod.knobs.sn_size + 10,
self.mod.knobs.sn_size + 100,
],
]
cdset = self.cgroup["Run time list"]
shotnumkey = "Shot number"
configkey = "Configuration name"
for og_shotnum in shotnum_list:
sn_arr = np.array(og_shotnum, dtype=np.uint32)
for cconfn in self.map.configs:
index, sni = build_shotnum_dset_relation(
sn_arr, cdset, shotnumkey, self.map, cconfn
)
self.assertSNSuite(
sn_arr, index, sni, cdset, shotnumkey, configkey, cconfn
)
def assertSNSuite(self, shotnum, index, sni, cdset, shotnumkey, configkey, cconfn):
"""Suite of assertions for shot number conditioning"""
# shotnum - original requested shot number
# index - index of dataset
# sni - boolean mask for shotnum
# shotnum[sni] = cdset[index, shotnumkey]
# cdset - control devices dataset
# shotnumkey - field in cdset that corresponds to shot numbers
# configkey - field in cdset that corresponds to configuration
# names
# cconfn - configuration name for control device
#
# all return variables should be np.ndarray
self.assertTrue(isinstance(index, np.ndarray))
self.assertTrue(isinstance(sni, np.ndarray))
# all should be 1D arrays
self.assertEqual(index.ndim, 1)
self.assertEqual(sni.ndim, 1)
# equate array sizes
self.assertEqual(shotnum.size, sni.size)
self.assertEqual(np.count_nonzero(sni), index.size)
# shotnum[sni] = cdset[index, shotnumkey]
if index.size != 0:
self.assertTrue(
np.array_equal(shotnum[sni], cdset[index.tolist(), shotnumkey])
)
else:
self.assertEqual(shotnum[sni].size, 0)
# ensure correct config is grabbed
if index.size != 0:
cname_arr = cdset[index.tolist(), configkey]
for name in cname_arr:
self.assertEqual(_bytes_to_str(name), cconfn)
class TestConditionControls(TestBase):
"""Test Case for condition_controls"""
# What to test:
# 1. passing of non lapd.File object
# - raises AttributeError
# 2. passing of controls as None (or not a list)
# - raises TypeError
# 3. HDF5 file with no controls
# - raises AttributeError
# 4. HDF5 file with one control
# - pass controls with
# a. just control name, no config
# b. control name and valid config
# c. control name and invalid config
# d. two control names
# 5. HDF5 file with multiple controls
#
def setUp(self):
# setup HDF5 file
super().setUp()
def tearDown(self):
super().tearDown()
@with_bf
def test_input_failures(self, _bf: File):
"""Test input failures of `controls`"""
# `controls` is Null
self.assertRaises(ValueError, condition_controls, _bf, [])
# `controls` is not a string or Iterable
self.assertRaises(TypeError, condition_controls, _bf, True)
# 'controls` element is not a str or tuple
self.assertRaises(TypeError, condition_controls, _bf, ["Waveform", 8])
# `controls` tuple element has length > 2
self.assertRaises(ValueError, condition_controls, _bf, [("Waveform", "c1", "c2")])
@with_bf
def test_file_w_one_control(self, _bf: File):
"""
Test `controls` conditioning for file with one control device.
"""
# set one control device
self.f.add_module("Waveform", mod_args={"n_configs": 1, "sn_size": 100})
_bf._map_file() # re-map file
# ---- Waveform w/ one Configuration ----
# conditions that work
con_list = [
"Waveform",
("Waveform",),
["Waveform"],
[("Waveform", "config01")],
]
for og_con in con_list:
self.assertEqual(condition_controls(_bf, og_con), [("Waveform", "config01")])
# conditions that raise ValueError
con_list = [
["Waveform", "config01"],
["Waveform", ("Waveform", "config01")],
["Waveform", "6K Compumotor"],
]
for og_con in con_list:
self.assertRaises(ValueError, condition_controls, _bf, og_con)
# ---- Waveform w/ three Configurations ----
self.f.modules["Waveform"].knobs.n_configs = 3
_bf._map_file() # re-map file
# conditions that work
con_list = [[("Waveform", "config01")], [("Waveform", "config02")]]
for og_con in con_list:
self.assertEqual(condition_controls(_bf, og_con), og_con)
# conditions that raise ValueError
con_list = [
["Waveform"],
["6K Compumotor", ("Waveform", "config01")],
]
for og_con in con_list:
self.assertRaises(ValueError, condition_controls, _bf, og_con)
@with_bf
def test_file_w_multiple_controls(self, _bf: File):
"""
Test `controls` conditioning for file with multiple (2) control
devices.
"""
# set modules
self.f.add_module("Waveform", {"n_configs": 1, "sn_size": 100})
self.f.add_module("6K Compumotor", {"n_configs": 1, "sn_size": 100})
_bf._map_file() # re-map file
# ---- 1 Waveform Config & 1 6K Config ----
sixk_cspec = self.f.modules["6K Compumotor"].config_names[0]
# conditions that work
con_list = [
("Waveform", [("Waveform", "config01")]),
(["Waveform"], [("Waveform", "config01")]),
([("Waveform", "config01")], [("Waveform", "config01")]),
(["6K Compumotor"], [("6K Compumotor", sixk_cspec)]),
([("6K Compumotor", sixk_cspec)], [("6K Compumotor", sixk_cspec)]),
(
["Waveform", "6K Compumotor"],
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
),
(
["Waveform", ("6K Compumotor", sixk_cspec)],
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
),
(
[("Waveform", "config01"), "6K Compumotor"],
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
),
(
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
),
]
for og_con, correct_con in con_list:
self.assertEqual(condition_controls(_bf, og_con), correct_con)
# conditions that raise TypeError
con_list = [
["6K Compumotor", sixk_cspec],
]
for og_con in con_list:
self.assertRaises(TypeError, condition_controls, _bf, og_con)
# conditions that raise ValueError
con_list = [
["Waveform", "config01"],
["Waveform", ("Waveform", "config01")],
["6K Compumotor", ("6K Compumotor", sixk_cspec)],
[("Waveform", "config02")],
]
for og_con in con_list:
self.assertRaises(ValueError, condition_controls, _bf, og_con)
# ---- 3 Waveform Config & 1 6K Config ----
self.f.modules["Waveform"].knobs.n_configs = 3
_bf._map_file() # re-map file
sixk_cspec = self.f.modules["6K Compumotor"].config_names[0]
# conditions that work
con_list = [
([("Waveform", "config01")], [("Waveform", "config01")]),
([("Waveform", "config03")], [("Waveform", "config03")]),
("6K Compumotor", [("6K Compumotor", sixk_cspec)]),
(["6K Compumotor"], [("6K Compumotor", sixk_cspec)]),
([("6K Compumotor", sixk_cspec)], [("6K Compumotor", sixk_cspec)]),
(
[("Waveform", "config01"), "6K Compumotor"],
[("Waveform", "config01"), ("6K Compumotor", sixk_cspec)],
),
(
[("Waveform", "config02"), ("6K Compumotor", sixk_cspec)],
[("Waveform", "config02"), ("6K Compumotor", sixk_cspec)],
),
]
for og_con, correct_con in | |
plot with the new index.
self.ChangeFOV()
def ChangeFOV(self):
"""
it changes the fov or channel according to the choice of the user
and it updates the plot shown and it initializes the new fov/channel
at t=0 by default.
"""
self.Tindex = 0
# load the image and mask for the current plot
self.m.currpicture = self.reader.LoadOneImage(self.Tindex,self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex,self.FOVindex)
# sets the image and the mask to 0 for the previous plot
self.m.prevpicture = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.prevplotmask = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
# load the image and the mask for the next plot, check if it exists
if self.Tindex+1 < self.reader.sizet:
self.m.nextpicture = self.reader.LoadOneImage(self.Tindex+1, self.FOVindex)
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+1, self.FOVindex)
# enables the next frame button in case it was disabled when the
# fov/channel was changed
self.button_nextframe.setEnabled(True)
else:
self.m.nextpicture = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.nextplotmask = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
# disables the next frame button if the mask or the picture
# does not exist.
self.button_nextframe.setEnabled(False)
# once the images and masks are loaded into the variables, they are
# displaye in the gui.
self.m.UpdatePlots()
# disables the previous frame button in case it was active before
# changing fov/channel.
self.button_previousframe.setEnabled(False)
# updates the title of the plots to display the right time indices
# aboves the plots.
self.UpdateTitleSubplots()
# if the button to hide the mask was checked before changing fov/channel,
# it hides the mask again.
if self.button_hidemask.isChecked():
self.m.HideMask()
# the button to set the time index is also set to 0/default again.
self.button_timeindex.setText('')
# enables the neural network buttons if there is already an
# existing prediction for the current image.
self.EnableCNNButtons()
def ReloadThreeMasks(self):
"""
A function which replots all the masks at the current time and fov
indices. Needed after the batch prediction is completed to display
the result of the NN.
"""
if self.Tindex >= 0 and self.Tindex <= self.reader.sizet-1:
if self.Tindex == 0:
self.button_nextframe.setEnabled(True)
if self.Tindex < self.reader.sizet-1:
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+1, self.FOVindex)
else:
np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.prevplotmask = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.UpdatePlots()
self.button_previousframe.setEnabled(False)
elif self.Tindex == self.reader.sizet-1:
self.button_previousframe.setEnabled(True)
self.m.prevplotmask = self.reader.LoadMask(self.Tindex-1, self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.nextplotmask = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.UpdatePlots()
self.button_nextframe.setEnabled(False)
else:
self.button_nextframe.setEnabled(True)
self.button_previousframe.setEnabled(True)
self.m.prevplotmask = self.reader.LoadMask(self.Tindex-1, self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+1, self.FOVindex)
self.m.UpdatePlots()
self.UpdateTitleSubplots()
if self.button_hidemask.isChecked():
self.m.HideMask()
self.EnableCNNButtons()
else:
return
def ChangeTimeFrame(self):
"""This funcion is called whenever the user gives a new time index,
to jump to the new given index, once "enter" button is pressed.
"""
# it reads out the text in the button and converts it to an int.
newtimeindex = int(self.button_timeindex.text())
if newtimeindex >= 0 and newtimeindex <= self.reader.sizet-1:
self.reader.SaveMask(self.Tindex, self.FOVindex, self.m.plotmask)
self.Tindex = newtimeindex
if self.Tindex == 0:
self.button_nextframe.setEnabled(True)
self.m.nextpicture = self.reader.LoadOneImage(self.Tindex+1,self.FOVindex)
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+1, self.FOVindex)
self.m.currpicture = self.reader.LoadOneImage(self.Tindex, self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.prevpicture = np.zeros([self.reader.sizey, self.reader.sizex],
dtype = np.uint16)
self.m.prevplotmask = np.zeros([self.reader.sizey, self.reader.sizex],
dtype = np.uint16)
self.m.UpdatePlots()
self.button_previousframe.setEnabled(False)
elif self.Tindex == self.reader.sizet-1:
self.button_previousframe.setEnabled(True)
self.m.prevpicture = self.reader.LoadOneImage(self.Tindex-1, self.FOVindex)
self.m.prevplotmask = self.reader.LoadMask(self.Tindex-1, self.FOVindex)
self.m.currpicture = self.reader.LoadOneImage(self.Tindex, self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.nextpicture = np.zeros([self.reader.sizey, self.reader.sizex],
dtype = np.uint16)
self.m.nextplotmask = np.zeros([self.reader.sizey, self.reader.sizex],
dtype = np.uint16)
self.m.UpdatePlots()
self.button_nextframe.setEnabled(False)
else:
self.button_nextframe.setEnabled(True)
self.button_previousframe.setEnabled(True)
self.m.prevpicture = self.reader.LoadOneImage(self.Tindex-1, self.FOVindex)
self.m.prevplotmask = self.reader.LoadMask(self.Tindex-1, self.FOVindex)
self.m.currpicture = self.reader.LoadOneImage(self.Tindex, self.FOVindex)
self.m.plotmask = self.reader.LoadMask(self.Tindex, self.FOVindex)
self.m.nextpicture = self.reader.LoadOneImage(self.Tindex+1,self.FOVindex)
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+1, self.FOVindex)
self.m.UpdatePlots()
self.UpdateTitleSubplots()
self.button_timeindex.clearFocus()
self.button_timeindex.setText(str(self.Tindex)+'/'+str(self.reader.sizet-1))
if self.button_hidemask.isChecked():
self.m.HideMask()
self.EnableCNNButtons()
else:
self.button_timeindex.clearFocus()
return
def CellCorrespActivation(self):
self.Disable(self.button_cellcorrespondence)
self.WriteStatusBar('Doing the cell correspondence')
if self.Tindex > 0:
self.m.plotmask = self.reader.CellCorrespondence(self.Tindex, self.FOVindex)
self.m.updatedata()
else:
pass
#self.m.plotmask = self.reader.LoadSeg(self.Tindex, self.FOVindex)
#self.m.updatedata()
self.Enable(self.button_cellcorrespondence)
self.button_cellcorrespondence.setChecked(False)
self.ClearStatusBar()
selis f.reader.SaveMask(self.Tindex, self.FOVindex, self.m.plotmask)
def ButtonSaveSegMask(self):
"""saves the segmented mask
"""
self.reader.SaveSegMask(self.Tindex, self.FOVindex, self.m.plotmask)
def ChangePreviousFrame(self):
"""This function is called when the previous frame buttons is pressed
and it tests if the buttons is enabled and if so it calls the
BackwardTime() function. It should avoid the let the user do multiple
clicks and that the function is then called afterwards several times,
once the frames and masks of the current time index have been loaded.
"""
if self.button_previousframe.isEnabled():
self.button_previousframe.setEnabled(False)
self.BackwardTime()
if self.Tindex >0:
self.button_previousframe.setEnabled(True)
else:
return
def ChangeNextFrame(self):
"""This function is called when the next frame buttons is pressed
and it tests if the buttons is enabled and if so it calls the
ForwardTime() function. It should avoid the let the user do multiple
clicks and that the function is then called afterwards several times,
once the frames and masks of the current time index have been loaded.
"""
if self.button_nextframe.isEnabled():
self.button_nextframe.setEnabled(False)
self.ForwardTime()
if self.Tindex + 1 < self.reader.sizet:
self.button_nextframe.setEnabled(True)
else:
return
def ForwardTime(self):
"""This function switches the frame in forward time index. And it tests
several conditions if t == lastTimeIndex-1, because then the next frame
button has to be disabled. It also tests if the show value of cells
button and hidemask are active in order to hide/show the mask or to
show the cell values.
"""
# the t frame is defined as the currently shown frame on the display.
# If the button "Next time frame" is pressed, this function is called
self.WriteStatusBar('Loading the next frame...')
self.Disable(self.button_nextframe)
if self.Tindex + 1 < self.reader.sizet - 1 :
self.reader.SaveMask(self.Tindex, self.FOVindex, self.m.plotmask)
self.m.prevpicture = self.m.currpicture.copy()
self.m.prevplotmask = self.m.plotmask.copy()
self.m.currpicture = self.m.nextpicture.copy()
self.m.plotmask = self.m.nextplotmask.copy()
self.m.nextpicture = self.reader.LoadOneImage(self.Tindex+2, self.FOVindex)
self.m.nextplotmask = self.reader.LoadMask(self.Tindex+2, self.FOVindex)
self.m.UpdatePlots()
if self.Tindex + 1 == 1:
self.button_previousframe.setEnabled(True)
else:
self.reader.SaveMask(self.Tindex, self.FOVindex, self.m.plotmask)
self.m.prevpicture = self.m.currpicture.copy()
self.m.prevplotmask = self.m.plotmask.copy()
self.m.currpicture = self.m.nextpicture.copy()
self.m.plotmask = self.m.nextplotmask.copy()
self.m.nextpicture = np.zeros([self.reader.sizey, self.reader.sizex],
dtype = np.uint16)
self.m.nextplotmask = np.zeros([self.reader.sizey,self.reader.sizex],
dtype = np.uint16)
self.m.UpdatePlots()
self.button_nextframe.setEnabled(False)
self.Tindex = self.Tindex+1
self.UpdateTitleSubplots()
if self.button_hidemask.isChecked():
self.m.HideMask()
self.Enable(self.button_nextframe)
self.ClearStatusBar()
self.button_timeindex.setText(str(self.Tindex)+'/'+str(self.reader.sizet-1))
def BackwardTime(self):
"""This function switches the frame in backward time index. And it
several conditions if t == 1, because then the button previous frame has to
be disabled. It also tests if the show value of cells button and
hidemask are active in order to hide/show the mask or to show the cell
values.
"""
# the t frame is defined as the currently shown frame on the display.
# If the button "Previous time frame" is pressed, this function is called
self.WriteStatusBar('Loading the previous frame...')
self.Disable(self.button_previousframe)
self.reader.SaveMask(self.Tindex, self.FOVindex, self.m.plotmask)
self.m.nextpicture = self.m.currpicture.copy()
self.m.nextplotmask = self.m.plotmask.copy()
self.m.currpicture = self.m.prevpicture.copy()
self.m.plotmask = self.m.prevplotmask.copy()
if self.Tindex == 1:
self.m.prevpicture = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.m.prevplotmask = np.zeros([self.reader.sizey, self.reader.sizex], dtype = np.uint16)
self.button_previousframe.setEnabled(False)
else:
self.m.prevpicture = self.reader.LoadOneImage(self.Tindex-2, self.FOVindex)
self.m.prevplotmask = self.reader.LoadMask(self.Tindex-2, self.FOVindex)
self.m.UpdatePlots()
if self.Tindex-1 == self.reader.sizet-2:
self.button_nextframe.setEnabled(True)
if self.button_hidemask.isChecked():
self.m.HideMask()
self.Tindex -= 1
self.UpdateTitleSubplots()
self.Enable(self.button_previousframe)
if self.Tindex > 0:
self.button_previousframe.setEnabled(True)
self.ClearStatusBar()
self.button_timeindex.setText(str(self.Tindex)+'/' + str(self.reader.sizet-1))
# -----------------------------------------------------------------------------
# MANUAL MASK CORRECTIONS
def ChangeOneValue(self):
"""This function is called when the button Change cell value is
clicked. It displays the instructions on the status bar.
And if the user clicks in the graph where the current mask is displayed
it connects the event of the click (meaning that user has clicked on
one cell) to the function self.DialogBoxChangeOneValue.
This function will then replaces the cell selected by the user with
the click with a new value entered by the user.
"""
# displaying the instructions on the statusbar
self.WriteStatusBar((
'Left-click to select cell, right-click to abort.'))
# disables all the buttons
self.Disable(self.button_changecellvalue)
# connects the event "press mouse button" in the matplotlib plot
# (picture) to | |
and `val'.
self._last_mutation = txn._mutations
def _invalidate(self):
if self._cur:
_lib.mdb_cursor_close(self._cur)
self.db._deps.discard(self)
self.txn._deps.discard(self)
self._cur = _invalid
self._dbi = _invalid
self._txn = _invalid
def __del__(self):
self._invalidate()
def close(self):
"""Close the cursor, freeing its associated resources."""
self._invalidate()
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self._invalidate()
def key(self):
"""Return the current key."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
return self._to_py(self._key)
def value(self):
"""Return the current value."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._val)
def item(self):
"""Return the current `(key, value)` pair."""
# Must refresh `key` and `val` following mutation.
if self._last_mutation != self.txn._mutations:
self._cursor_get(_lib.MDB_GET_CURRENT)
preload(self._val)
return self._to_py(self._key), self._to_py(self._val)
def _iter(self, op, keys, values):
if not values:
get = self.key
elif not keys:
get = self.value
else:
get = self.item
cur = self._cur
key = self._key
val = self._val
rc = 0
while self._valid:
yield get()
rc = _lib.mdb_cursor_get(cur, key, val, op)
self._valid = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
raise _error("mdb_cursor_get", rc)
def iternext(self, keys=True, values=True):
"""Return a forward iterator that yields the current element before
calling :py:meth:`next`, repeating until the end of the database is
reached. As a convenience, :py:class:`Cursor` implements the iterator
protocol by automatically returning a forward iterator when invoked:
::
>>> # Equivalent:
>>> it = iter(cursor)
>>> it = cursor.iternext(keys=True, values=True)
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT, keys, values)
__iter__ = iternext
def iternext_dup(self, keys=False, values=True):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_dup`,
repeating until the last value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
.. code-block:: python
if not cursor.set_key("foo"):
print("No values found for 'foo'")
else:
for idx, data in enumerate(cursor.iternext_dup()):
print("%d'th value for 'foo': %s" % (idx, data))
"""
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
def iternext_nodup(self, keys=True, values=False):
"""Return a forward iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`next_nodup`,
repeating until the end of the database is reached.
Only meaningful for databases opened with `dupsort=True`.
If the cursor is not yet positioned, it is moved to the first key in
the database, otherwise iteration proceeds from the current position.
.. code-block:: python
for key in cursor.iternext_nodup():
print("Key '%s' has %d values" % (key, cursor.count()))
"""
if not self._valid:
self.first()
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
def iterprev(self, keys=True, values=True):
"""Return a reverse iterator that yields the current element before
calling :py:meth:`prev`, until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
::
>>> with env.begin() as txn:
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
... print('%dth last item is (%r, %r)' % (1+i, key, value))
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV, keys, values)
def iterprev_dup(self, keys=False, values=True):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_dup`,
repeating until the first value of the current key is reached.
Only meaningful for databases opened with `dupsort=True`.
"""
return self._iter(_lib.MDB_PREV_DUP, keys, values)
def iterprev_nodup(self, keys=True, values=False):
"""Return a reverse iterator that yields the current value
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
repeating until the start of the database is reached.
If the cursor is not yet positioned, it is moved to the last key in
the database, otherwise iteration proceeds from the current position.
Only meaningful for databases opened with `dupsort=True`.
"""
if not self._valid:
self.last()
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
def _cursor_get(self, op):
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
self._valid = v = not rc
self._last_mutation = self.txn._mutations
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def _cursor_get_kv(self, op, k, v):
rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v),
self._key, self._val, op)
self._valid = v = not rc
if rc:
self._key.mv_size = 0
self._val.mv_size = 0
if rc != _lib.MDB_NOTFOUND:
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
raise _error("mdb_cursor_get", rc)
return v
def first(self):
"""Move to the first key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the first value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST)
def first_dup(self):
"""Move to the first value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_FIRST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_FIRST_DUP)
def last(self):
"""Move to the last key in the database, returning ``True`` on success
or ``False`` if the database is empty.
If the database was opened with `dupsort=True` and the key contains
duplicates, the cursor is positioned on the last value ("duplicate").
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST)
def last_dup(self):
"""Move to the last value ("duplicate") for the current key, returning
``True`` on success or ``False`` if the database is empty.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_LAST_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_LAST_DUP)
def prev(self):
"""Move to the previous element, returning ``True`` on success or
``False`` if there is no previous item.
For databases opened with `dupsort=True`, moves to the previous data
item ("duplicate") for the current key if one exists, otherwise moves
to the previous key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV)
def prev_dup(self):
"""Move to the previous value ("duplicate") of the current key,
returning ``True`` on success or ``False`` if there is no previous
value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_DUP)
def prev_nodup(self):
"""Move to the last value ("duplicate") of the previous key, returning
``True`` on success or ``False`` if there is no previous key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_PREV_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_PREV_NODUP)
def next(self):
"""Move to the next element, returning ``True`` on success or ``False``
if there is no next element.
For databases opened with `dupsort=True`, moves to the next value
("duplicate") for the current key if one exists, otherwise moves to the
first value of the next key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT)
def next_dup(self):
"""Move to the next value ("duplicate") of the current key, returning
``True`` on success or ``False`` if there is no next value.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_DUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_DUP)
def next_nodup(self):
"""Move to the first value ("duplicate") of the next key, returning
``True`` on success or ``False`` if there is no next key.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_NEXT_NODUP
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get(_lib.MDB_NEXT_NODUP)
def set_key(self, key):
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
the exact key was not found. It is an error to :py:meth:`set_key` the
empty bytestring.
For databases opened with `dupsort=True`, moves to the first value
("duplicate") for the key.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_SET_KEY
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
def set_key_dup(self, key, value):
"""Seek exactly to `(key, value)`, returning ``True`` on success or
``False`` if the exact key and value was not found. It is an error
to :py:meth:`set_key` the empty bytestring.
Only meaningful for databases opened with `dupsort=True`.
Equivalent to `mdb_cursor_get()
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
with `MDB_GET_BOTH
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
"""
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
def get(self, key, default=None):
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
| |
self.ax = None
self.cursor = None
self.navigation_toolbar = None
self.isocenter = np.array([0, 0], dtype=float)
self.cursor_position = np.array([0, 0])
self.image_title = ""
self.image_type_names = {-1: "Optical Density", 0: "Dose (cGy)", 1: "Pixel Value", 2: 'Gamma Matrix'}
self.image_type = ''
self.colormap = 'jet'
self.cal = None
self.showed = False
self.scale = 'mm'
# colormaps
self.cmaps = ['jet', 'viridis', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'afmhot',
'autumn', 'bone', 'cool', 'copper', 'gist_heat', 'gray', 'hot', 'pink', 'spring', 'summer',
'winter', 'BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn',
'Spectral', 'seismic', 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3',
'gist_earth', 'terrain', 'ocean', 'gist_stern', 'brg', 'CMRmap', 'cubehelix', 'gnuplot',
'gnuplot2', 'gist_ncar', 'nipy_spectral', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
self._colormap_combo()
self.window_validator()
self.set_connections()
def window_validator(self):
# integers 0 to 9999
rx = QtCore.QRegExp("[0-9]\\d{0,3}")
# the validator treats the regexp as "^[1-9]\\d{0,3}$"
v = QtGui.QRegExpValidator()
v.setRegExp(rx)
self.minLineEdit.setValidator(v)
self.maxLineEdit.setValidator(v)
@property
def iso_reg(self):
return self.cursor_position
def set_scale(self, scale='mm'):
self.scale = scale
def _colormap_combo(self):
for item in self.cmaps:
self.colorComboBox.addItem(item)
def set_connections(self):
self.channel_box.activated.connect(self.on_activated)
self.colorComboBox.activated[str].connect(self.on_color)
self.minLineEdit.returnPressed.connect(self.on_min)
self.maxLineEdit.returnPressed.connect(self.on_max)
self.rotate_90cw.clicked.connect(self.on_rotateCW)
self.rotate_90ccw.clicked.connect(self.on_rotateCCW)
self.button_rotatePoints.clicked.connect(self.on_rotate)
self.save_as.clicked.connect(self.save_images)
self.isocenter_button.clicked.connect(self.set_isocenter)
self.button_fliplr.clicked.connect(self.on_fliplr)
self.button_flipud.clicked.connect(self.on_flipud)
def set_image(self, im, delta, channel=1, im_type='', calib_data=None):
self.im = im
self.delta = delta
self.channel = channel
self.image_type = im_type
self.calib_data = calib_data
self.im_bkp = self.im.copy()
def set_colormap(self, colormap='jet'):
"""
Set the colormap of the EditImageWidget.
colormap = [('Sequential', ['Blues', 'BuGn', 'BuPu',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool', 'copper',
'gist_heat', 'gray', 'hot', 'pink',
'spring', 'summer', 'winter']),
('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'seismic']),
('Qualitative', ['Accent', 'Dark2', 'Paired', 'Pastel1',
'Pastel2', 'Set1', 'Set2', 'Set3']),
('Miscellaneous', ['gist_earth', 'terrain', 'ocean', 'gist_stern',
'brg', 'CMRmap', 'cubehelix',
'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'viridis', 'jet', 'rainbow',
'gist_rainbow', 'hsv', 'flag', 'prism'])
:param colormap: str of colormap
"""
self.colormap = colormap
def set_image_path(self, path):
self.image_location = path
def read_image(self, path_to_image=None):
if path_to_image is None:
self.image_location, pn = QtGui.QFileDialog.getOpenFileName(self,
"Import 48 bits tiff File or Film2Dose image files.",
QtCore.QDir.currentPath(),
"Tiff Files (*.tif);;"
"Film2Dose images (*.fti);;"
"Film2Dose Dose images (*.ftd);;"
"DICOM Images (*.dcm)")
else:
self.image_location = path_to_image
QtCore.QDir.setCurrent(self.image_location)
_, filepart = os.path.splitext(self.image_location)
if self.image_location:
if filepart == '.tif':
data, self.delta = read_tiff(self.image_location)
self.image_type = 'tif'
self.image_title = self.image_type_names[-1]
self.im = np.zeros(data.shape)
self.im[:, :, 0] = data[:, :, 0]
self.im[:, :, 1] = data[:, :, 1]
self.im[:, :, 2] = data[:, :, 2]
elif filepart == '.fti':
data = load(self.image_location)
self.im, self.delta, self.image_type, self.calib_data = data.get_image()
if self.image_type == "Pixel":
self.image_title = self.image_type_names[1]
else:
self.image_title = self.image_type_names[-1]
elif filepart == '.ftd':
data = load(self.image_location)
self.im, self.delta, self.image_type, self.calib_data = data.get_image()
self.image_title = self.image_type_names[0]
elif filepart == '.dcm':
im, self.delta = read_dicom(self.image_location)
self.im = np.zeros((im.shape[0], im.shape[1], 3))
self.im[:, :, 0] = im
self.im[:, :, 1] = im
self.im[:, :, 2] = im
self.image_type = 'DICOM'
self.im_bkp = self.im.copy()
def set_windows_limits(self, im):
try:
self.min_sat = np.percentile(im, 1)
self.max_sat = np.percentile(im, 99)
except:
self.min_sat = im.min()
self.max_sat = im.max()
mi = str(round(self.min_sat))
mx = str(round(self.max_sat))
self.minLineEdit.setText(mi)
self.maxLineEdit.setText(mx)
def show_image(self, fig=None, ax=None):
if fig is None and ax is None:
im = self.im[:, :, self.channel]
if self.min_sat is None or self.max_sat is None:
self.set_windows_limits(im)
if self.image_type == 'Gamma':
self.min_sat = im.min()
self.max_sat = 1.0
self.minLineEdit.setText(str(0))
self.maxLineEdit.setText(str(1))
self.fig, self.ax = display_fig(im=im, delta=self.delta, col_map=self.colormap,
limits=(self.min_sat, self.max_sat),
offset=self.isocenter, scale=self.scale)
elif fig == 1:
im = self.im[:, :, self.channel]
self.set_windows_limits(im)
lim = (self.min_sat, self.max_sat)
del self.fig
del self.ax
self.fig, self.ax = display_fig(im, self.delta, self.colormap, lim, self.isocenter, self.scale)
else:
self.fig = fig
self.ax = ax
if self.image_type == 'tif':
title = self.image_type_names[-1] + " - " + self.channel_names[self.channel]
elif self.image_type == 'Gamma':
title = 'Gamma Matrix'
else:
title = self.image_type_names[self.channel] + " - " + self.channel_names[self.channel]
self.ax.set_title(title)
self.canvas = RotationCanvas(self.fig)
self.verticalLayout_2.addWidget(self.canvas)
self.navigation_toolbar = Film2DoseToolbar(self.canvas, self)
self.navigation_toolbar.setIconSize(QtCore.QSize(46, 46))
self.verticalLayout_2.addWidget(self.navigation_toolbar)
def image_threshold(self):
im = auto_threshold(self.im[:, :, self.channel])
self.im[:, :, self.channel] = im
self.update_image(fig=1)
def image_trim(self, x_border, y_border):
self.im = image_trim_xy(self.im, self.delta, x_border, y_border)
self.update_image(fig=1)
def restore_image(self):
self.im = self.im_bkp.copy()
self.update_image(fig=1)
def on_color(self, txt):
self.colormap = txt
self.update_image(fig=1)
def on_min(self):
print('on_min')
self.min_sat = float(self.minLineEdit.text())
self.update_image(fig=1)
def on_max(self):
print('on_max')
self.max_sat = float(self.maxLineEdit.text())
self.update_image(fig=1)
def get_canvas_points(self, n):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(n)
tmp = self.canvas.get_points()
self.cursor.disconnect_events()
pos = np.asarray(tmp[0])
return pos
def on_flipud(self):
# TODO add a header to keep all data manipulations.
self.im = np.flipud(self.im)
self.update_image(fig=1)
def on_fliplr(self):
self.im = np.fliplr(self.im)
self.update_image(fig=1)
def get_position(self):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(1)
position = self.canvas.get_points()
self.update_image(fig=1)
return np.asarray(position[0])
def get_points(self, npoints):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(npoints)
position = self.canvas.get_points()
self.update_image(fig=1)
return np.asarray(position)
def set_isocenter(self):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(1)
position = self.canvas.get_points()
pos = np.array(position[0], dtype=float)
self.cursor_position = np.asarray(position[0]).astype(int)
self.isocenter += pos
print(self.cursor_position)
print('Position: ', self.cursor_position)
print('actual isocenter: ', self.isocenter)
self.update_image(fig=1)
def save_images(self):
h0, h1 = self.ax.get_xlim()
v0, v1 = self.ax.get_ylim()
imc = get_crop(self.im, self.delta, [h0, h1, v0, v1])
print('limits: xlim: %s, %s ylim: %s, %s' % (h0, h1, v0, v1))
# print(self.delta)
im = Fim2DoseImage(imc, self.delta, self.image_type, self.isocenter, self.calib_data)
file_name, _ = QtGui.QFileDialog.getSaveFileName(None, "Save Film2Dose image",
QtCore.QDir.currentPath(),
"Film2Dose images (*.fti)")
if file_name[-3:] == 'fti':
save_ftd(im, file_name)
def get_image(self):
return self.im[:, :, self.channel], self.delta
def get_all_channels(self):
return self.im, self.delta
def on_rotateCW(self):
self.im = np.rot90(self.im, 3)
self.update_image(fig=1)
def on_rotateCCW(self):
self.im = np.rot90(self.im)
self.update_image(fig=1)
def on_rotate(self):
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
position = self.canvas.get_points()
x = (position[0][0], position[1][0])
y = (position[0][1], position[1][1])
self.im = rotate_image(self.im, x, y)
self.update_image(fig=1)
def update_image(self, fig=None, ax=None):
try:
self.verticalLayout_2.removeWidget(self.canvas)
self.canvas.setParent(None)
self.verticalLayout_2.removeWidget(self.navigation_toolbar)
self.navigation_toolbar.setParent(None)
del self.canvas
del self.navigation_toolbar
self.show_image(fig, ax)
except:
pass
def on_activated(self):
if self.channel_box.currentIndex() == 0:
self.update_combo()
elif self.channel_box.currentIndex() == 1:
self.update_combo()
elif self.channel_box.currentIndex() == 2:
self.update_combo()
def update_combo(self):
self.channel = self.channel_box.currentIndex()
try:
self.verticalLayout_2.removeWidget(self.canvas)
self.canvas.setParent(None)
self.verticalLayout_2.removeWidget(self.navigation_toolbar)
self.navigation_toolbar.setParent(None)
self.show_image()
except:
pass
class TPSWidget(QtGui.QWidget, TPSWidgetQT.Ui_imageForm):
# TODO refactor dose window using line edit
def __init__(self, parent=None):
super(TPSWidget, self).__init__(parent)
self.setupUi(self)
self.image_location = None
self.im = np.array([])
self.image_type = ''
self.min_sat = None
self.max_sat = None
self.delta = 0.0
self.canvas = None
self.fig = None
self.ax = None
self.cursor = None
self.navigation_toolbar = None
self.isocenter = np.array([0.0, 0.0])
self.cursor_position = np.array([0.0, 0.0])
self.image_title = 'TPS calculated doses - cGy'
self.colormap = 'jet'
self.radio_mm.setChecked(True)
self.scale = 'mm'
self.interpolation = None
# colormaps
self.cmaps = ['jet', 'viridis', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn',
'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'afmhot', 'autumn',
'bone', 'cool', 'copper', 'gist_heat', 'gray', 'hot', 'pink', 'spring', 'summer', 'winter',
'BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'seismic', 'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3',
'gist_earth', 'terrain', 'ocean', 'gist_stern', 'brg', 'CMRmap', 'cubehelix', 'gnuplot',
'gnuplot2', 'gist_ncar', 'nipy_spectral', 'rainbow', 'gist_rainbow', 'hsv', 'flag', 'prism']
self._colormap_combo()
self.window_validator()
self.set_connections()
def window_validator(self):
# integers 0 to 9999
rx = QtCore.QRegExp("[0-9]\\d{0,3}")
# the validator treats the regexp as "^[1-9]\\d{0,3}$"
v = QtGui.QRegExpValidator()
v.setRegExp(rx)
self.minLineEdit.setValidator(v)
self.maxLineEdit.setValidator(v)
def get_points(self, npoints):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(npoints)
position = self.canvas.get_points()
self.update_image()
return np.asarray(position)
def set_image(self, im, delta):
self.im = im
self.delta = delta
def get_image(self):
return self.im, self.delta
@property
def iso_reg(self):
return self.cursor_position
def set_scale(self, scale='mm'):
self.scale = scale
def _colormap_combo(self):
for item in self.cmaps:
self.colorComboBox.addItem(item)
def set_connections(self):
self.colorComboBox.activated[str].connect(self.on_color)
self.minLineEdit.returnPressed.connect(self.on_min)
self.maxLineEdit.returnPressed.connect(self.on_max)
self.rotate_90cw.clicked.connect(self.on_rotateCW)
self.rotate_90ccw.clicked.connect(self.on_rotateCCW)
self.open_button.clicked.connect(self.on_open)
self.button_fliplr.clicked.connect(self.on_fliplr)
self.button_flipud.clicked.connect(self.on_flipud)
self.multiply_button.clicked.connect(self.on_multiply)
self.radio_mm.clicked.connect(self.on_mm)
self.radio_pixel.clicked.connect(self.on_pixel)
def on_mm(self):
self.scale = 'mm'
self.update_image()
def on_pixel(self):
self.scale = 'pix'
self.update_image()
def on_multiply(self):
"""
Multipy image by a factor ( float number)
"""
factor, flag = QtGui.QInputDialog.getDouble(self, "Multiply image by a factor", "factor", decimals=6, value=1)
if flag:
self.im = self.im * factor
self.update_image()
def on_open(self):
flag = self.read_image()
if flag:
self.update_image()
def on_color(self, txt):
self.colormap = txt
self.update_image()
def on_min(self):
print('on_min')
self.min_sat = float(self.minLineEdit.text())
self.update_image(fig=1)
def on_max(self):
print('on_max')
self.max_sat = float(self.maxLineEdit.text())
self.update_image(fig=1)
def on_rotateCW(self):
self.im = np.rot90(self.im, 3)
self.update_image()
def on_rotateCCW(self):
self.im = np.rot90(self.im)
self.update_image()
def get_canvas_points(self, n):
self.update_image()
self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1)
self.canvas.set_points(n)
tmp = self.canvas.get_points()
self.cursor.disconnect_events()
pos = np.asarray(tmp[0])
return pos
def on_flipud(self):
self.im = np.flipud(self.im)
self.update_image()
def on_fliplr(self):
self.im = np.fliplr(self.im)
self.update_image()
| |
<gh_stars>0
"""
Contains functions relating to the connected non-Gaussian covariance approximation.
"""
import glob
import os.path
import time
import numpy as np
def get_avg_l(lmin, lmax, n_bandpower):
"""
Determine the weighted average l per bandpower.
Args:
lmin (int): Minimum l.
lmax (int): Maximum l.
n_bandpower (int): Number of bandpowers.
"""
# Calculate bin boundaries (add small fraction to lmax to include it in the end bin)
edges = np.logspace(np.log10(lmin), np.log10(lmax + 1e-5), n_bandpower + 1)
# Loop over bins
for bin_idx, (lo_edge, hi_edge) in enumerate(zip(edges[:-1], edges[1:])):
# Determine ells in bin
bin_lmin = np.ceil(lo_edge)
bin_lmax = np.floor(hi_edge)
bin_ell = np.arange(bin_lmin, bin_lmax + 1)
# Calculate weights = l(l+1)/2π
weights = bin_ell * (bin_ell + 1) / (2 * np.pi)
# Calculate weighted average l over bin
weighted_avg_l = np.average(bin_ell, weights=weights)
# Round to the nearest integer and print
weighted_avg_l = int(np.around(weighted_avg_l))
print(f'Bin {bin_idx}: {weighted_avg_l}')
def get_bin_weights(full_cov_path, binmat_path, lmax, lmin, ells, save_path):
"""
Obtain the binning weights used in the connected non-Gaussian approximation.
Args:
full_cov_path (str): Path to full connected non-Gaussian covariance block.
binmat_path (str): Path to binning matrix.
lmax (int): Maximum l.
lmin (int): Minimum l.
ells (list): List of ells to evaluate the weights for, as given by get_avg_l.
save_path (str): Path to save output .npz file to.
"""
# Load unmixed covariance and truncate to the lmax
print('Loading covariance')
n_ell = lmax - lmin + 1
with np.load(full_cov_path) as data:
cov_unbinned = data['cov'][:n_ell, :n_ell]
# Load binning matrix
print('Loading binning matrix')
with np.load(binmat_path) as data:
binmat = data['pbl']
# Apply binning matrix
print('Applying binning matrix')
cov_binned = binmat @ cov_unbinned @ binmat.T
# Extract out the sampled ells
print('Extracting sampled ells')
ell_idx = np.subtract(ells, lmin)
cov_sampled = cov_unbinned[ell_idx, :][:, ell_idx]
# Calculate ratio between sampled and real binned
ratio = cov_sampled / cov_binned
# Calculate and save weights to go from sampled to binned
weights = 1 / ratio
assert np.allclose(cov_sampled * weights, cov_binned, atol=0)
header = ('Weights to go from covariance block sampled at ells given in ells array to approximate binned '
f'covariance. Output from {__file__}.get_bin_weights for params '
f'full_cov_path = {full_cov_path}, binmat_path = {binmat_path}, lmax = {lmax}, lmin = {lmin}, '
f'ells = {ells}, at {time.strftime("%c")}')
np.savez_compressed(save_path, weights=weights, ells=ells, header=header)
print('Saved ' + save_path)
def get_mix_weights(full_cov_path, binmat_path, bin_weights_path, mixmat_path, lmin, save_path, fsky=None):
"""
Obtain the mixing weights used in the connected non-Gaussian approximation.
Args:
full_cov_path (str): Path to full connected non-Gaussian covariance block.
binmat_path (str): Path to binning matrix.
bin_weights_path (str): Path to binning weights obtained with get_bin_weights.
mixmat_path (str): Path to mixing matrix.
lmin (int): Minimum l.
save_path (str): Path to save output .npz file to.
fsky (float, optional): Sky fraction - if supplied, will multiply input covariance block by 1/fsky. This is only
necessary if the full_cov_path is the path to the full-sky connected non-Gaussian
covariance and hasn't already received the 1/fsky factor.
"""
# Load unmixed, unbinned block
print('Loading original block')
with np.load(full_cov_path) as data:
unmixed_unbinned_block = data['cov']
# Load binning weights
print('Loading binning weights')
with np.load(bin_weights_path) as data:
part1_weights = data['weights']
sampled_ell = data['ells']
# Load binning matrix
print('Loading binning matrix')
with np.load(binmat_path) as data:
pbl = data['pbl']
# Load mixing matrix
print('Loading mixing matrix')
with np.load(mixmat_path) as data:
mixmat = data['mixmat_ee_to_ee'][lmin:, lmin:]
# Adjust unmixed, unbinned block for fsky
if fsky is not None:
print('Applying fsky correction')
unmixed_unbinned_block /= fsky
# Select sampled ells and apply part 1 weights
print('Applying part 1 weights')
sampled_ell_idx = sampled_ell - lmin
part1_input = unmixed_unbinned_block[sampled_ell_idx, :][:, sampled_ell_idx]
part1_output = part1_weights * part1_input
# As a check, apply binning matrix to truncated unmixed unbinned matrix
# and confirm that it gives an identical result
n_ell = mixmat.shape[0]
part1_check = pbl @ unmixed_unbinned_block[:n_ell, :n_ell] @ pbl.T
print('Part 1 check:', np.allclose(part1_output, part1_check, atol=0))
# Apply mixing matrix to unmixed unbinned matrix, followed by binning matrix, to obtain binned mixed block
print('Applying mixing matrix')
mixed_unbinned_block = mixmat @ unmixed_unbinned_block @ mixmat.T
print('Applying binning matrix')
mixed_binned_block = pbl @ mixed_unbinned_block @ pbl.T
# Elementwise divide binned mixed block by binned unmixed block to give effective fsky^2 matrix
print('Calculating effective fsky^2 matrix')
eff_fsky2 = mixed_binned_block / part1_output
# As a check, apply effective fsky^2 matrix to binned unmixed block and check identical to binned mixed block
part2_check = eff_fsky2 * part1_output
print('Check:', np.allclose(part2_check, mixed_binned_block, atol=0))
# Save effective fsky^2 matrix to disk
header = (f'Mixing weights for CNG approximation. Output from {__file__}.get_mix_weights for params '
f'full_cov_path = {full_cov_path}, bin_weights_path = {bin_weights_path}, '
f'binmat_path = {binmat_path}, mixmat_path = {mixmat_path}, fsky = {fsky}, lmin = {lmin}, '
f'at {time.strftime("%c")}')
np.savez(save_path, eff_fsky2=eff_fsky2, header=header)
print('Saved ' + save_path)
def test_bin_weights(ss_block_filemask, binmat_path, lmax, lmin, ells, n_spec, save_path):
"""
Test the approach of binning weights by applying them to super-sample covariance blocks and measuring the ratio of
approximate to exact (full treatment) covariance.
Args:
ss_block_filemask (str): Path to input (unmixed, unbinned) super-sample covariance blocks, with {spec1_idx}
and {spec2_idx} placeholders.
binmat_path (str): Path to binning matrix.
lmax (int): Maximum l.
lmin (int): Minimum l.
ells (list): List of ells to evaluate the weights for, as given by get_avg_l.
n_spec (int): Number of power spectra.
save_path (str): Path to save covariance ratios to, for later plotting using plotting.cng_approx.
"""
# Load binning matrix
with np.load(binmat_path) as data:
pbl = data['pbl']
# Load the first block and use it to calculate weights
print('Calculating weights')
n_ell = lmax - lmin + 1
with np.load(ss_block_filemask.format(spec1_idx=0, spec2_idx=0)) as data:
first_block_unbinned = data['cov'][:n_ell, :n_ell]
first_block_binned = pbl @ first_block_unbinned @ pbl.T
ell_idx = np.subtract(ells, lmin)
first_block_sampled = first_block_unbinned[ell_idx, :][:, ell_idx]
weights = first_block_binned / first_block_sampled
assert np.allclose(first_block_sampled * weights, first_block_binned, atol=0)
assert np.allclose(weights, weights.T, atol=0)
# Loop over all other blocks and measure ratio between approximate and exact
n_blocks = n_spec * (n_spec + 1) // 2
n_bp = len(ells)
ratios = np.full((n_blocks - 1, n_bp, n_bp), np.NINF)
block_idx = 0
for spec1_idx in range(1, n_spec):
for spec2_idx in range(spec1_idx + 1):
print(f'Validating weights, block {block_idx + 1} / {n_blocks - 1}')
with np.load(ss_block_filemask.format(spec1_idx=spec1_idx, spec2_idx=spec2_idx)) as data:
block_unbinned = data['cov'][:n_ell, :n_ell]
block_binned = pbl @ block_unbinned @ pbl.T
block_sampled = block_unbinned[ell_idx, :][:, ell_idx]
ratio = weights * block_sampled / block_binned
# For symmetric blocks use nan for the lower triangle so no double-counting
if spec1_idx == spec2_idx:
ratio[np.tril_indices(n_bp, k=-1)] = np.nan
ratios[block_idx] = ratio
block_idx += 1
# Save ratios to disk
assert not np.any(np.isneginf(ratios))
header = (f'Output of {__file__}.test_bin_weights for ss_block_filemask = {ss_block_filemask}, '
f'binmat_path = {binmat_path}, lmax = {lmax}, ells = {ells}, n_spec = {n_spec}, at {time.strftime("%c")}')
np.savez_compressed(save_path, ratios=ratios, header=header)
print('Saved ' + save_path)
def test_mix_weights(unmixed_unbinned_ss_dir, mixed_binned_ss_dir, input_filemask, binmat_path, n_spec, save_path):
"""
Test the approach of mixing weights by applying them to super-sample covariance blocks and measuring the ratio of
approximate to exact (full treatment) covariance.
Args:
unmixed_unbinned_ss_dir (str): Path to directory containing unmixed unbinned super-sample covariance blocks.
mixed_binned_ss_dir (str): Path to directory containing mixed binned super-sample covariance blocks.
input_filemask (str): Filename of blocks within input directories, with {spec1_idx} and {spec2_idx}
placeholders.
binmat_path (str): Path to binning matrix.
n_spec (int): Number of power spectra.
save_path (str): Path to save covariance ratios to, for later plotting using plotting.cng_approx.
"""
# For the first block:
print('Calculating weights')
# Load unmixed unbinned block
first_block_filename = input_filemask.format(spec1_idx=0, spec2_idx=0)
with np.load(os.path.join(unmixed_unbinned_ss_dir, first_block_filename)) as data:
unmixed_unbinned_first_block = data['cov']
# Load mixed binned block
with np.load(os.path.join(mixed_binned_ss_dir, first_block_filename)) as data:
mixed_binned_first_block = data['cov_binned']
# Load binning matrix
with np.load(binmat_path) as data:
pbl = data['pbl']
# Apply binning matrix to truncated unmixed unbinned block to obtain unmixed binned block
n_ell = pbl.shape[1]
unmixed_unbinned_first_block = unmixed_unbinned_first_block[:n_ell, :n_ell]
unmixed_binned_first_block = pbl @ unmixed_unbinned_first_block @ pbl.T
# Divide mixed binned block by unmixed binned block to obtain effective fsky^2 matrix
eff_fsky2 = mixed_binned_first_block / unmixed_binned_first_block
# Loop over subsequent blocks
n_blocks = n_spec * (n_spec + 1) // 2
n_bp = pbl.shape[0]
ratios = np.full((n_blocks - 1, n_bp, n_bp), np.NINF)
block_idx = 0
for spec1_idx in | |
'death')
killed_msg += "**, and **" + get_name(killed_players[1]) + "**, a **" + get_role(killed_players[1], 'death') + "**, were found. Those remaining mourn the tragedy."
else:
killed_msg += "The dead bodies of **" + "**, **".join([x + "**, a **" + get_role(x, 'death') for x in killed_players[:-1]]) + "**, and **" + killed_players[-1]
killed_msg += "**, a **" + get_role(killed_players[-1], 'death') + "**, were found. Those remaining mourn the tragedy."
if session[0] and await win_condition() == None:
await client.send_message(client.get_channel(GAME_CHANNEL), "Night lasted **{0:02d}:{1:02d}**. The villagers wake up and search the village.\n\n{2}".format(
night_elapsed.seconds // 60, night_elapsed.seconds % 60, killed_msg))
if session[0] and await win_condition() == None:
if len(totem_holders) == 0:
pass
elif len(totem_holders) == 1:
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotem']).format(get_name(totem_holders[0])))
elif len(totem_holders) == 2:
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotem2']).format(get_name(totem_holders[0]), get_name(totem_holders[1])))
else:
await client.send_message(client.get_channel(GAME_CHANNEL), random.choice(lang['hastotems']).format('**, **'.join([get_name(x) for x in totem_holders[:-1]]), get_name(totem_holders[-1])))
for player in killed_temp:
session[1][player][0] = False
for player in list(session[1].keys()):
session[1][player][2] = ''
if session[0] and await win_condition() == None:
await check_traitor()
# DAY
session[3][1] = datetime.now()
if session[0] and await win_condition() == None:
await client.send_message(client.get_channel(GAME_CHANNEL), "It is now **daytime**. Use `{}lynch <player>` to vote to lynch <player>.".format(BOT_PREFIX))
lynched_player = None
while await win_condition() == None and session[2] and lynched_player == None and session[0]:
able_players = [x for x in session[1] if session[1][x][0]]
vote_dict = {'abstain' : 0}
totem_dict = {} # For impatience and pacifism
for player in able_players:
totem_dict[player] = session[1][player][4].count('impatience_totem') - session[1][player][4].count('pacifism_totem')
vote_dict[player] = 0
able_voters = [x for x in able_players if totem_dict[x] == 0]
for player in able_voters:
if session[1][player][2] in vote_dict:
vote_dict[session[1][player][2]] += 1
if 'influence_totem' in session[1][player][4] and session[1][player][2] not in ['']:
vote_dict[session[1][player][2]] += 1
for player in [x for x in able_players if totem_dict[x] != 0]:
if totem_dict[player] < 0:
vote_dict['abstain'] += 1
else:
for p in [x for x in able_players if x != player]:
vote_dict[p] += 1
if vote_dict['abstain'] >= len([x for x in session[1] if session[1][x][0]]) / 2:
lynched_player = 'abstain'
max_votes = max([vote_dict[x] for x in vote_dict])
if max_votes >= len([x for x in session[1] if session[1][x][0]]) // 2 + 1:
for voted in vote_dict:
if vote_dict[voted] == max_votes:
lynched_player = voted
if (datetime.now() - session[3][1]).total_seconds() > DAY_TIMEOUT:
session[2] = False
await asyncio.sleep(0.1)
day_elapsed = datetime.now() - session[3][1]
session[4][1] += day_elapsed
lynched_msg = ""
if lynched_player:
if lynched_player == 'abstain':
for player in [x for x in totem_dict if totem_dict[x] < 0]:
lynched_msg += "**{}** meekly votes to not lynch anyone today.\n".format(get_name(player))
lynched_msg += "The village has agreed to not lynch anyone today."
await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg)
else:
for player in [x for x in totem_dict if totem_dict[x] > 0 and x != lynched_player]:
lynched_msg += "**{}** impatiently votes to lynch **{}**.\n".format(get_name(player), get_name(lynched_player))
lynched_msg += '\n'
if 'revealing_totem' in session[1][lynched_player][4]:
lynched_msg += 'As the villagers prepare to lynch **{0}**, their totem emits a brilliant flash of light! When the villagers are able to see again, '
lynched_msg += 'they discover that {0} has escaped! The left-behind totem seems to have taken on the shape of a **{1}**.'
lynched_msg = lynched_msg.format(get_name(lynched_player), get_role(lynched_player, 'role'))
await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg)
else:
lynched_msg += random.choice(lang['lynched']).format(get_name(lynched_player), get_role(lynched_player, 'death'))
await client.send_message(client.get_channel(GAME_CHANNEL), lynched_msg)
session[1][lynched_player][0] = False
member = client.get_server(WEREWOLF_SERVER).get_member(lynched_player)
if member:
await client.remove_roles(member, PLAYERS_ROLE)
if get_role(lynched_player, 'role') == 'fool' and 'revealing_totem' not in session[1][lynched_player][4]:
win_msg = "The fool has been lynched, causing them to win!\n\n" + end_game_stats()
win_msg += "\n\nThe winner is **{}**!".format(get_name(lynched_player))
await end_game(win_msg)
return
elif lynched_player == None and await win_condition() == None and session[0]:
await client.send_message(client.get_channel(GAME_CHANNEL), "Not enough votes were cast to lynch a player.")
# BETWEEN DAY AND NIGHT
session[2] = False
if session[0] and await win_condition() == None:
await client.send_message(client.get_channel(GAME_CHANNEL), "Day lasted **{0:02d}:{1:02d}**. The villagers, exhausted from the day's events, go to bed.".format(
day_elapsed.seconds // 60, day_elapsed.seconds % 60))
for player in list(session[1].keys()):
session[1][player][4][:] = [x for x in session[1][player][4] if x not in ['revealing_totem', 'influence_totem', 'impatience_totem', 'pacifism_totem']]
session[1][player][2] = ''
if session[0] and await win_condition() == None:
await check_traitor()
if session[0]:
win_msg = await win_condition()
await end_game(win_msg[1])
async def rate_limit(message):
if not (message.channel.is_private or message.content.startswith(BOT_PREFIX)) or message.author.id in ADMINS or message.author.id == OWNER_ID:
return False
global ratelimit_dict
global IGNORE_LIST
if message.author.id not in ratelimit_dict.keys():
ratelimit_dict[message.author.id] = 1
else:
ratelimit_dict[message.author.id] += 1
if ratelimit_dict[message.author.id] > IGNORE_THRESHOLD:
if not message.author.id in IGNORE_LIST:
IGNORE_LIST.append(message.author.id)
await log(1, message.author.name + " (" + message.author.id + ") was added to the ignore list for rate limiting.")
try:
await reply(message, "You've used {0} commands in the last {1} seconds; I will ignore you from now on.".format(IGNORE_THRESHOLD, TOKEN_RESET))
except discord.Forbidden:
await client.send_message(client.get_channel(GAME_CHANNEL), message.author.mention +
" used {0} commands in the last {1} seconds and will be ignored from now on.".format(IGNORE_THRESHOLD, TOKEN_RESET))
finally:
return True
if message.author.id in IGNORE_LIST or ratelimit_dict[message.author.id] > TOKENS_GIVEN:
if ratelimit_dict[message.author.id] > TOKENS_GIVEN:
await log(1, "Ignoring message from " + message.author.name + " (" + message.author.id + "): `" + message.content + "` since no tokens remaining")
return True
return False
async def do_rate_limit_loop():
await client.wait_until_ready()
global ratelimit_dict
while not client.is_closed:
for user in list(ratelimit_dict.keys()):
ratelimit_dict[user] = 0
await asyncio.sleep(TOKEN_RESET)
async def game_start_timeout_loop():
session[5] = datetime.now()
while not session[0] and len(session[1].keys()) > 0 and datetime.now() - session[5] < timedelta(seconds=GAME_START_TIMEOUT):
await asyncio.sleep(0.1)
if not session[0] and len(session[1].keys()) > 0:
await client.send_message(client.get_channel(GAME_CHANNEL), "{0}, the game has taken too long to start and has been cancelled. "
"If you are still here and would like to start a new game, please do `!join` again.".format(PLAYERS_ROLE.mention))
session[0] = False
perms = client.get_channel(GAME_CHANNEL).overwrites_for(client.get_server(WEREWOLF_SERVER).default_role)
perms.send_messages = True
await client.edit_channel_permissions(client.get_channel(GAME_CHANNEL), client.get_server(WEREWOLF_SERVER).default_role, perms)
for player in list(list(session[1].keys())):
del session[1][player]
member = client.get_server(WEREWOLF_SERVER).get_member(player)
if member:
await client.remove_roles(member, PLAYERS_ROLE)
session[3] = [0, 0]
session[4] = [timedelta(0), timedelta(0)]
async def backup_settings_loop():
while not client.is_closed:
print("BACKING UP SETTINGS")
with open(NOTIFY_FILE, 'w') as notify_file:
notify_file.write(','.join([x for x in notify_me if x != '']))
await asyncio.sleep(BACKUP_INTERVAL)
############## POST-DECLARATION STUFF ###############
# {command name : [function, permissions [in channel, in pm], description]}
commands = {'shutdown' : [cmd_shutdown, [2, 2], "```\n{0}shutdown takes no arguments\n\nShuts down the bot. Owner-only.```"],
'refresh' : [cmd_refresh, [1, 1], "```\n{0}refresh [<language file>]\n\nRefreshes the current language's language file from GitHub. Admin only.```"],
'ping' : [cmd_ping, [0, 0], "```\n{0}ping takes no arguments\n\nTests the bot\'s responsiveness.```"],
'eval' : [cmd_eval, [2, 2], "```\n{0}eval <evaluation string>\n\nEvaluates <evaluation string> using Python\'s eval() function and returns a result. Owner-only.```"],
'exec' : [cmd_exec, [2, 2], "```\n{0}exec <exec string>\n\nExecutes <exec string> using Python\'s exec() function. Owner-only.```"],
'help' : [cmd_help, [0, 0], "```\n{0}help <command>\n\nReturns hopefully helpful information on <command>. Try {0}list for a listing of commands.```"],
'list' : [cmd_list, [0, 0], "```\n{0}list takes no arguments\n\nDisplays a listing of commands. Try {0}help <command> for help regarding a specific command.```"],
'join' : [cmd_join, [0, 1], "```\n{0}join takes no arguments\n\nJoins the game if it has not started yet```"],
'j' : [cmd_join, [0, 1], "```\nAlias for {0}join.```"],
'leave' : [cmd_leave, [0, 1], "```\n{0}leave takes no arguments\n\nLeaves the current game. If you need to leave, please do it before the game starts.```"],
'start' : [cmd_start, [0, 1], "```\n{0}start takes no arguemnts\n\nStarts the game. A game needs at least " + str(MIN_PLAYERS) + " players to start.```"],
'sync' : [cmd_sync, [1, 1], "```\n{0}sync takes no arguments\n\nSynchronizes all player roles and channel permissions with session.```"],
'op' : [cmd_op, [1, 1], "```\n{0}op takes no arguments\n\nOps yourself if you are an admin```"],
'deop' : [cmd_deop, [1, 1], "```\n{0}deop takes no arguments\n\nDeops yourself so you can play with the players ;)```"],
'fjoin' : [cmd_fjoin, [1, 1], "```\n{0}fjoin <mentions of users>\n\nForces each <mention> to join the game.```"],
'fleave' : [cmd_fleave, [1, 1], "```\n{0}fleave <mentions of users | all>\n\nForces each <mention> to leave the game. If the parameter is all, removes all players from the game.```"],
'role' : [cmd_role, [0, 0], "```\n{0}role [<role>|<number of players>]\n\nIf a <role> is given, displays a description of <role>. "
"If a <number of players> is given, displays the quantity of each role for the specified <number of players>. "
"If left blank, displays | |
<reponame>bcwingnut/ROAR<gh_stars>1-10
from ROAR.utilities_module.utilities import dist_to_line_2d
from abc import abstractmethod
from collections import deque
import logging
from typing import Any
from ROAR.agent_module.agent import Agent
from ROAR.perception_module.detector import Detector
import cv2
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from PIL import Image
import os
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def canny(img, low_threshold, high_threshold):
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
return img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
return cv2.addWeighted(initial_img, α, img, β, γ)
def read_img(img):
return mpimg.imread(img)
def to_hls(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
def to_hsv(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
def isolate_color_mask(img, low_thresh, high_thresh):
assert(low_thresh.all() >=0 and low_thresh.all() <=255)
assert(high_thresh.all() >=0 and high_thresh.all() <=255)
return cv2.inRange(img, low_thresh, high_thresh)
def adjust_gamma(image, gamma=1.0):
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def save_imgs(img_list, labels, prefix="Test", op_folder="test_imgs_output"):
if not os.path.exists(op_folder):
os.mkdir(op_folder)
for img, label in zip(img_list, labels):
PATH = op_folder + "/" + prefix + "_" + label
Image.fromarray(img).save(PATH)
def display_imgs(img_list, labels=[],cols=2, fig_size=(15,15)):
if len(labels) > 0:
assert(len(img_list) == len(labels))
assert(len(img_list) > 0)
cmap = None
tot = len(img_list)
rows = tot / cols
plt.figure(figsize=fig_size)
for i in range(tot):
plt.subplot(rows, cols, i+1)
if len(img_list[i].shape) == 2:
cmap = 'gray'
if len(labels) > 0:
plt.title(labels[i])
plt.imshow(img_list[i], cmap=cmap)
plt.tight_layout()
plt.show()
def get_aoi(img):
rows, cols = img.shape[:2]
mask = np.zeros_like(img)
left_bottom = [cols * -0.1, rows]
right_bottom = [cols * 1.1, rows]
left_top = [cols * 0.4, rows * 0.6]
right_top = [cols * 0.6, rows * 0.6]
vertices = np.array([[left_bottom, left_top, right_top, right_bottom]], dtype=np.int32)
if len(mask.shape) == 2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255, ) * mask.shape[2])
return cv2.bitwise_and(img, mask)
def get_hough_lines(img, rho=1, theta=np.pi/180, threshold=20, min_line_len=20, max_line_gap=300):
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len, maxLineGap=max_line_gap)
return lines
def get_line_length(line):
for x1, y1, x2, y2 in line:
return np.sqrt((y2-y1)**2 + (x2-x1)**2)
def get_line_slope_intercept(line):
for x1, y1, x2, y2 in line:
if x2-x1 == 0:
return math.inf, 0
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope * x1
return slope, intercept
def get_lines_slope_intecept(lines, slope_threshold = 0.3):
left_lines = []
right_lines = []
left_lengths = []
right_lengths = []
for line in lines:
slope, intercept = get_line_slope_intercept(line)
if slope == math.inf:
continue
line_len = get_line_length(line)
if slope < - slope_threshold:
left_lines.append((slope, intercept))
left_lengths.append(line_len)
elif slope > slope_threshold :
right_lines.append((slope, intercept))
right_lengths.append(line_len)
# average
left_avg = np.dot(left_lengths, left_lines)/np.sum(left_lengths) if len(left_lengths) > 0 else None
right_avg = np.dot(right_lengths, right_lines)/np.sum(right_lengths) if len(right_lengths) > 0 else None
return left_avg, right_avg
def convert_slope_intercept_to_line(y1, y2 , line, xmax):
if line is None:
return None
slope, intercept = line
x1 = int((y1- intercept)/slope)
if x1 < 0:
x1 = 0
y1 = int(intercept)
elif x1 > xmax:
x1 = xmax
y1 = int(x1 * slope + intercept)
else:
y1 = int(y1)
x2 = int((y2- intercept)/slope)
if x2 < 0:
x2 = 0
y2 = int(intercept)
elif x2 > xmax:
x2 = xmax
y2 = int(x2 * slope + intercept)
else:
y2 = int(y2)
return((x1, y1),(x2, y2))
def get_lane_lines(img, lines):
left_avg, right_avg = get_lines_slope_intecept(lines)
y1 = img.shape[0] - 1
y2 = img.shape[0] * 0.6
left_lane = convert_slope_intercept_to_line(y1, y2, left_avg, img.shape[1]-1)
right_lane = convert_slope_intercept_to_line(y1, y2, right_avg, img.shape[1]-1)
# Lanes are two close meaning that only one lane is detected
if left_lane is not None and right_lane is not None and abs(left_lane[0][0]-right_lane[0][0])<img.shape[1]/2:
return None, None
return left_lane, right_lane
def draw_weighted_lines(img, lines, color=[255, 0, 0], thickness=2, alpha = 1.0, beta = 0.95, gamma= 0):
mask_img = np.zeros_like(img)
for line in lines:
if line is not None:
cv2.line(mask_img, *line, color, thickness)
return weighted_img(mask_img, img, alpha, beta, gamma)
class LaneDetector(Detector):
def __init__(self, agent: Agent, mem_size: int = 5, **kwargs):
super().__init__(agent, **kwargs)
self.left_lane = None # lastest left lane coordinates in world frame
self.right_lane = None # lastest right lane coordinates in world frame
self.lane_center = None
self.dist_to_lane_center = 0 # distance to lane center, positive when car is on the right side of the lane center
# self.left_mem = deque(mem_size)
# self.right_mem = deque(mem_size)
def run_in_series(self, **kwargs) -> Any:
rgb_img = self.agent.front_rgb_camera.data
self.dist_to_lane_center = self.process_image(rgb_img, visualize=False)
if self.dist_to_lane_center is None:
self.lane_center = None
return self.dist_to_lane_center
def run_in_threaded(self, **kwargs):
pass
def process_image(self, image, visualize=False, **kwargs):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
if image is None or len(image.shape) != 3:
return None
original_img = np.copy(image)
# cv2.imshow("original img", original_img)
if visualize:
original_aoi_img = get_aoi(original_img)
# convert to grayscale
gray_img = grayscale(image)
# darken the grayscale
# darkened_img = adjust_gamma(gray_img, 1)
# cv2.imshow("darkened img", darkened_img)
# Color Selection
# white_mask = isolate_color_mask(to_hls(image), np.array([0, 0, 0], dtype=np.uint8), np.array([200, 255, 255], dtype=np.uint8))
# cv2.imshow("white mask", white_mask)
# yellow_mask = isolate_color_mask(to_hls(image), np.array([10, 0, 100], dtype=np.uint8), np.array([40, 255, 255], dtype=np.uint8))
# cv2.imshow("yellow mask", yellow_mask)
# mask = cv2.bitwise_or(white_mask, yellow_mask)
# cv2.imshow("mask", mask)
# colored_img = cv2.bitwise_and(darkened_img, darkened_img, mask=mask)
# Apply Gaussian Blur
# blurred_img = gaussian_blur(colored_img, kernel_size=7)
# Apply Canny edge filter
canny_img = canny(gray_img, low_threshold=70, high_threshold=140)
# cv2.imshow("canny_img", canny_img)
# Get Area of Interest
aoi_img = get_aoi(canny_img)
# Apply Hough lines
hough_lines = get_hough_lines(aoi_img)
# hough_img = draw_lines(original_img, hough_lines)
# cv2.imshow("hough_img", hough_img)
if hough_lines is None:
return None
# Extrapolation and averaging
left_lane, right_lane = get_lane_lines(original_img, hough_lines)
if left_lane is None or right_lane is None:
return None
# self.calculate_world_cords(np.array(left_lane+right_lane).T[::-1])
# Convert to wold frame
if left_lane is not None:
self.left_lane = self.calculate_world_cords(np.array(left_lane).T[::-1])
if right_lane is not None:
self.right_lane = self.calculate_world_cords(np.array(right_lane).T[::-1])
self.lane_center = (self.left_lane + self.right_lane) / 2
#car_center = self.agent.vehicle.transform.get_matrix()@np.r_[0,self.agent.vehicle.wheel_base/2,0,1]
dist_to_lane_center = dist_to_line_2d(np.array([self.agent.vehicle.transform.location.x,self.agent.vehicle.transform.location.z]), self.lane_center[0,[0,2]], self.lane_center[1,[0,2]])
if visualize:
processed_img = draw_weighted_lines(original_aoi_img, [left_lane, right_lane], thickness= 10)
if left_lane is not None:
for (x,y), coor_world in zip(left_lane, self.left_lane):
processed_img = cv2.putText(processed_img, str(coor_world), (x-100,y-20), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 255, 255), 1, cv2.LINE_AA)
if right_lane is not None:
for (x,y), coor_world in zip(right_lane, self.right_lane):
processed_img = cv2.putText(processed_img, str(coor_world), (x-100,y-30), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 255, 255), 1, cv2.LINE_AA)
if left_lane is not None and right_lane is not None:
center_x1 = (left_lane[0][0] + right_lane[0][0]) // 2
center_x2 = (left_lane[1][0] + right_lane[1][0]) // 2
center_y1 = (left_lane[0][1] + right_lane[0][1]) // 2
center_y2 = (left_lane[1][1] + right_lane[1][1]) // 2
lane_center = (center_x1, center_y1), (center_x2, center_y2)
processed_img = draw_weighted_lines(processed_img, [lane_center], thickness= 5, color=[0,255,0])
for (x,y), coor_world in zip(lane_center, self.lane_center):
processed_img = cv2.putText(processed_img, str(coor_world), (x-100,y-40), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255, 255, 255), 1, cv2.LINE_AA)
processed_img = cv2.putText(processed_img, str(dist_to_lane_center), (0, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
processed_img = cv2.putText(processed_img, str((self.agent.vehicle.transform.location.x,self.agent.vehicle.transform.location.z)), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("processed img", processed_img)
return dist_to_lane_center
def calculate_world_cords(self, coords):
depth_img = self.agent.front_depth_camera.data
# cv2.imshow('depth', np.minimum(depth_img,0.01)*100)
raw_p2d = np.reshape(self._pix2xyz(depth_img=depth_img, i=coords[0], j=coords[1]), (3, np.shape(coords)[1])).T
cords_y_minus_z_x = np.linalg.inv(self.agent.front_depth_camera.intrinsics_matrix) @ raw_p2d.T
cords_xyz_1 = np.vstack([
cords_y_minus_z_x[2, :],
-cords_y_minus_z_x[1, :],
cords_y_minus_z_x[0, :],
np.ones((1, np.shape(cords_y_minus_z_x)[1]))
])
points: np.ndarray = self.agent.vehicle.transform.get_matrix() @ self.agent.front_depth_camera.transform.get_matrix() @ cords_xyz_1
points = points.T[:, :3]
return points
@staticmethod
def _pix2xyz(depth_img, i, j):
return [
depth_img[i, j] * j * 1000,
depth_img[i, j] * i * 1000,
| |
point.
bd - Disable a breakpoint.
be - Enable a breakpoint.
bc - Clear (delete) a breakpoint.
bl - List all breakpoints.
load - Load session breakpoints.
save - save session breakpoints.
Misc:
-----
thread - Display threads or switch to a particular thread.
list - List source code.
stack - Display stack trace.
up - Go up one frame in stack.
down - Go down one frame in stack.
encoding - Set the source encoding used by exec and eval commands.
eval - Evaluate expression in the context of the current frame.
exec - Execute suite in the context of the current frame.
analyze - Toggle analyze last exception mode.
trap - Get or set "trap unhandled exceptions" mode.
fork - Get or set fork handling mode.
synchro - Get or set synchronicity mode.
License:
----------------
copyright - Print copyright notice.
license - Print license.
credits - Print credits information.
type help <topic> for futher information."""
self.print_notice(help_notice)
def help_copyright(self):
_print("""copyright
Print copyright notice.""", self.m_stdout)
def help_license(self):
_print("""license
Print license.""", self.m_stdout)
def help_credits(self):
_print("""credits
Print credits information.""", self.m_stdout)
def help_help(self):
_print("""help <cmd>
Print help for command <cmd>.
On the other hand I guess that you already know that, don't you?""", self.m_stdout)
def help_analyze(self):
_print("""analyze
(shorthand - a)
Toggle analyze last exception mode.
The following changes to the debugger behavior apply in analyze mode:
The debugger prompt changes to 'Analyze>'.
'go', 'step', 'next', and 'return' are not allowed.
'thread' does not allow to change the thread focus.
'stack' allows no arguments.
'list' does not accept the '*' (all threads) argument
'stack', 'list', 'eval', 'exec', 'up', and 'down' operate on the thrown
exception.""", self.m_stdout)
help_a = help_analyze
def help_password(self):
_print("""password <password>
Get or set the channel password.
Communication between the console and the debuggee is always authenticated and
optionally encrypted. The password (A secret known to the console and the
debuggee alone) governs both security methods. The password is never
communicated between the two components on the communication channel.
A password is always required since unsecured communication between the
console and the debuggee might expose your machine to attacks.""", self.m_stdout)
def help_remote(self):
_print("""remote [True | False]
Get or set "allow connections from remote machines" mode.
When set to False:
Newly launched debuggees will listen on localhost only. In this mode, debugger
consoles on remote machines will NOT BE able to see or attach to the debuggee.
When set to True:
Newly launched debuggees will listen on INADDR_ANY. In this mode, debugger
consoles on remote machines will BE able to see and attach to the debuggee.""", self.m_stdout)
def help_trap(self):
_print("""trap [True | False]
Get or set "trap unhandled exceptions" mode.
When set to False:
Debuggee will ignore unhandled exceptions.
When set to True:
Debuggee will pause on unhandled exceptions for inspection.""", self.m_stdout)
def help_synchro(self):
_print("""synchro [True | False]
Get or set the synchronicity mode.
Traditional Python debuggers that use the inspected thread
(usually the main thread) to query or modify the script
name-space have to wait until the script hits a break-point.
Synchronicity allows the debugger to query and modify the
script name-space even if its threads are still running or
blocked in C library code by using special worker threads.
In some rare cases querying or modifying data in
synchronicity can crash the script. For example in some
Linux builds of wxPython querying the state of wx objects
from a thread other than the GUI thread can crash the
script. If this happens or if you want to restrict these
operations to the inspected thread, turn synchronicity off.
Default is True.""", self.m_stdout)
def help_fork(self):
_print("""fork [parent | child] [manual | auto]
Get or set fork handling mode.
Without arguments returns the current mode.
When 'parent' is specified the debugger will continue to debug the original
parent process after a fork.
When 'child' is specified the debugger will switch to debug the forked
child process after a fork.
When 'manual' is specified the debugger will pause before doing a fork.
When 'auto' is specified the debugger will go through the fork without
pausing and will make the forking decision based on the parent/child
setting.
WARNING:
On some Posix OS such as FreeBSD, Stepping into the child fork
can result in termination of the child process since the debugger
uses threading for its operation and on these systems threading and
forking can conflict.
""", self.m_stdout)
def help_stop(self):
_print("""stop
Shutdown the debugged script.""", self.m_stdout)
def help_launch(self):
_print("""launch [-k] <script_name> [<script_args>]
Start script <script_name> and attach to it.
-k Don't change the current working directory. By default the working
directory of the launched script is set to its folder.""", self.m_stdout)
def help_restart(self):
_print("""restart
Restart a script with same arguments from last launch.""", self.m_stdout)
def help_attach(self):
_print("""attach [<arg>]
Without an argument, 'attach' prints the scripts available for debugging
on the selected host. To select a host use the 'host' command. A script is
considered available for debugging only if it is using the rpdb2 module or
has been executed by the debugger.
If the debugger is already attached to a script, a special character will
mark that script in the list.
When <arg> is an integer the debugger will try to attach to a script with
that pid.
When <arg> is a string the debugger will try to attach to a script
with that name in the list.""", self.m_stdout)
def help_detach(self):
_print("""detach
Detach from the script the debugger is currently attached to. The detached
script will continue execution.""", self.m_stdout)
def help_break(self):
_print("""break
(shorthand - b)
Request script to break (pause execution as if it hit a breakpoint).
The 'break' command returns immdeiately but the break is only established
when an active thread submits to the debugger control. If a thread is
doing a system call or executing C code, this will happen only when
it returns to do python code.""", self.m_stdout)
help_b = help_break
def help_bp(self):
_print("""bp [<filename>':'] (<line> | <scope>) [',' <expr>]
Set a breakpoint.
<filename> - either the filename or the module name.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.
<expr> - condition to evaluate in the context of the frame. If it
evaluates to 'True' the break point will break into the debugger.
In case the <filemame> is omitted, the current file is assumed. In this case
the debuggee has to be waiting at break point.
Examples:
bp test_file.py:20
bp test_file.py:MyClass.Foo
bp 304
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
def help_be(self):
_print("""be (<id_list> | '*')
Enable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - Enable all breakpoints.""", self.m_stdout)
def help_bd(self):
_print("""bd (<id_list> | '*')
Disable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - disable all breakpoints.""", self.m_stdout)
def help_bc(self):
_print("""bc (<id_list> | '*')
Clear (delete) breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - clear all breakpoints.""", self.m_stdout)
def help_bl(self):
_print("""bl
List all breakpoints, sorted by their id.""", self.m_stdout)
def help_load(self):
_print("""load [<filename>]
Load breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_save(self):
_print("""save [<filename>]
save breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_go(self):
_print("""go [[<filename>':'] (<line> | <scope>)]
(shorthand - g)
Resume execution of a script that is waiting at break point.
If an argument is present, continue execution until that argument is reached.
<filename> - is the file name which basically is the script's name without
the '.py' extension.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.""", self.m_stdout)
help_g = help_go
def help_exit(self):
_print("""exit
Exit the debugger. If the debugger is attached to a script, the debugger
will attempt to detach from the script first.""", self.m_stdout)
help_EOF = help_exit
def help_host(self):
_print("""host [<arg>]
Without an argument, 'host' prints the current selected host.
With an argument <arg>, 'host' attempts to resolve <arg> | |
*v);
void glBindSampler(GLuint unit, GLuint sampler);
void glLineWidth(GLfloat width);
void glImageTransformParameterfvHP(GLenum target, GLenum pname, const GLfloat *params);
void glBindBufferOffsetEXT(GLenum target, GLuint index, GLuint buffer, GLintptr offset);
void glVertexArrayEdgeFlagOffsetEXT(GLuint vaobj, GLuint buffer, GLsizei stride, GLintptr offset);
void glGetIntegeri_v(GLenum target, GLuint index, GLint *data);
void glGetTransformFeedbackVarying(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
void glVDPAUMapSurfacesNV(GLsizei numSurfaces, const GLvdpauSurfaceNV *surfaces);
void glProgramLocalParameter4fvARB(GLenum target, GLuint index, const GLfloat *params);
void glGetTransformFeedbackVaryingNV(GLuint program, GLuint index, GLint *location);
void glWindowPos2iv(const GLint *v);
void glVertexStream1dATI(GLenum stream, GLdouble x);
void glColorFragmentOp2ATI(GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod);
void glFogiv(GLenum pname, const GLint *params);
GLuint64 glGetTextureHandleNV(GLuint texture);
void glLightModeliv(GLenum pname, const GLint *params);
void glDepthRangef(GLfloat n, GLfloat f);
void glGetFragmentMaterialivSGIX(GLenum face, GLenum pname, GLint *params);
void glVideoCaptureStreamParameterfvNV(GLuint video_capture_slot, GLuint stream, GLenum pname, const GLfloat *params);
void glDeleteProgramsARB(GLsizei n, const GLuint *programs);
void glWindowPos3fvARB(const GLfloat *v);
void glFeedbackBufferxOES(GLsizei n, GLenum type, const GLfixed *buffer);
void glGetTexBumpParameterfvATI(GLenum pname, GLfloat *param);
void glEnablei(GLenum target, GLuint index);
void glBindProgramARB(GLenum target, GLuint program);
void glEvalCoord1fv(const GLfloat *u);
void glProgramUniform3ui64vARB(GLuint program, GLint location, GLsizei count, const GLuint64 *value);
void glProgramLocalParameterI4uiNV(GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
void glMultiTexCoord2hvNV(GLenum target, const GLhalfNV *v);
void glSampleCoverageARB(GLfloat value, GLboolean invert);
void glProgramUniform2ui64vARB(GLuint program, GLint location, GLsizei count, const GLuint64 *value);
void glPixelDataRangeNV(GLenum target, GLsizei length, const void *pointer);
void glVertexStream3svATI(GLenum stream, const GLshort *coords);
void glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN(const GLuint *rc, const GLfloat *tc, const GLfloat *n, const GLfloat *v);
void glTexCoord3hvNV(const GLhalfNV *v);
void glSampleMaski(GLuint maskNumber, GLbitfield mask);
void glIndexFuncEXT(GLenum func, GLclampf ref);
void glPointParameteriNV(GLenum pname, GLint param);
void glUniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glGetFramebufferParameterivEXT(GLuint framebuffer, GLenum pname, GLint *params);
void glUniform2i64ARB(GLint location, GLint64 x, GLint64 y);
void glGetInternalformativ(GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params);
void glNamedFramebufferTexture3DEXT(GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
void glVertexAttrib2dv(GLuint index, const GLdouble *v);
void glVariantubvEXT(GLuint id, const GLubyte *addr);
void glGetVertexAttribArrayObjectivATI(GLuint index, GLenum pname, GLint *params);
void glProgramUniformMatrix3fvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glEnableVertexArrayEXT(GLuint vaobj, GLenum array);
void glColorTableParameterfvSGI(GLenum target, GLenum pname, const GLfloat *params);
void glEdgeFlag(GLboolean flag);
void glProgramUniform1ui(GLuint program, GLint location, GLuint v0);
void glVertex3d(GLdouble x, GLdouble y, GLdouble z);
void glVertex3f(GLfloat x, GLfloat y, GLfloat z);
void glGetColorTable(GLenum target, GLenum format, GLenum type, void *table);
void glPrimitiveBoundingBoxARB(GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
void glMultiTexCoord4ivARB(GLenum target, const GLint *v);
void glVertex3s(GLshort x, GLshort y, GLshort z);
void glTexCoordP2ui(GLenum type, GLuint coords);
void glColorMaski(GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
void glPrimitiveRestartIndexNV(GLuint index);
void glRectxOES(GLfixed x1, GLfixed y1, GLfixed x2, GLfixed y2);
void glCopyNamedBufferSubData(GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
void glGenProgramsNV(GLsizei n, GLuint *programs);
void glFragmentLightfSGIX(GLenum light, GLenum pname, GLfloat param);
void glTexStorage3D(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
void glTextureParameteriv(GLuint texture, GLenum pname, const GLint *param);
void glNamedBufferDataEXT(GLuint buffer, GLsizeiptr size, const void *data, GLenum usage);
void glMultiTexCoord3fvARB(GLenum target, const GLfloat *v);
void glUniform2fvARB(GLint location, GLsizei count, const GLfloat *value);
void glUniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glSubpixelPrecisionBiasNV(GLuint xbits, GLuint ybits);
void glNormalPointer(GLenum type, GLsizei stride, const void *pointer);
void glNamedFramebufferTexture(GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);
void glVertexAttrib4NsvARB(GLuint index, const GLshort *v);
void glPassThrough(GLfloat token);
void glSecondaryColorP3ui(GLenum type, GLuint color);
GLboolean glIsImageHandleResidentARB(GLuint64 handle);
void glConvolutionParameterfEXT(GLenum target, GLenum pname, GLfloat params);
void glProgramUniformMatrix4x3fv(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
void glBegin(GLenum mode);
void glEvalCoord2dv(const GLdouble *u);
void glColor3ubv(const GLubyte *v);
void glFogCoordfvEXT(const GLfloat *coord);
void glVertexP3ui(GLenum type, GLuint value);
void glLightfv(GLenum light, GLenum pname, const GLfloat *params);
void glVertexAttribL3i64NV(GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z);
void glStencilClearTagEXT(GLsizei stencilTagBits, GLuint stencilClearTag);
GLboolean glTestObjectAPPLE(GLenum object, GLuint name);
void glGetActiveUniformName(GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName);
void glTangentPointerEXT(GLenum type, GLsizei stride, const void *pointer);
void glUniform4ui64vNV(GLint location, GLsizei count, const GLuint64EXT *value);
void glDebugMessageEnableAMD(GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
void glGetFramebufferAttachmentParameteriv(GLenum target, GLenum attachment, GLenum pname, GLint *params);
void glCopyTexSubImage2DEXT(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
void glGenRenderbuffersEXT(GLsizei n, GLuint *renderbuffers);
void glNamedProgramLocalParameterI4ivEXT(GLuint program, GLenum target, GLuint index, const GLint *params);
void glMultiTexCoord2f(GLenum target, GLfloat s, GLfloat t);
void glGetMultiTexParameterIuivEXT(GLenum texunit, GLenum target, GLenum pname, GLuint *params);
void glNamedFramebufferDrawBuffer(GLuint framebuffer, GLenum buf);
void glTexParameteriv(GLenum target, GLenum pname, const GLint *params);
void glUniform4ivARB(GLint location, GLsizei count, const GLint *value);
void glMatrixOrthoEXT(GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
void glVertexArrayVertexBuffer(GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
void glGetTexImage(GLenum target, GLint level, GLenum format, GLenum type, void *pixels);
void glProgramUniform4ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
void glGetNamedStringivARB(GLint namelen, const GLchar *name, GLenum pname, GLint *params);
void glVertexAttribL1i64vNV(GLuint index, const GLint64EXT *v);
void glTransformFeedbackBufferBase(GLuint xfb, GLuint index, GLuint buffer);
void glIndexsv(const GLshort *c);
void glPointParameterivNV(GLenum pname, const GLint *params);
void glGetDetailTexFuncSGIS(GLenum target, GLfloat *points);
void glTexCoordP3uiv(GLenum type, const GLuint *coords);
void glReplacementCodeuiColor3fVertex3fvSUN(const GLuint *rc, const GLfloat *c, const GLfloat *v);
void glProgramLocalParameter4fARB(GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void glBitmap(GLsizei width, GLsizei height, GLfloat xorig, GLfloat yorig, GLfloat xmove, GLfloat ymove, const GLubyte *bitmap);
void glColorSubTable(GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const void *data);
void glMultiTexGenfvEXT(GLenum texunit, GLenum coord, GLenum pname, const GLfloat *params);
void glGetNamedBufferSubData(GLuint buffer, GLintptr offset, GLsizeiptr size, void *data);
void glStencilFuncSeparateATI(GLenum frontfunc, GLenum backfunc, GLint ref, GLuint mask);
void glProgramUniform2iv(GLuint program, GLint location, GLsizei count, const GLint *value);
void glGetQueryiv(GLenum target, GLenum pname, GLint *params);
void glGetTransformFeedbackiv(GLuint xfb, GLenum pname, GLint *param);
void glListParameterivSGIX(GLuint list, GLenum pname, const GLint *params);
void glFragmentLightModelivSGIX(GLenum pname, const GLint *params);
void glTexCoord4i(GLint s, GLint t, GLint r, GLint q);
void glObjectLabel(GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
void glProgramUniform3i64NV(GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
GLuint64 glGetTextureHandleARB(GLuint texture);
void glAlphaFragmentOp1ATI(GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod);
void glColorTableParameteriv(GLenum target, GLenum pname, const GLint *params);
void glDebugMessageControlKHR(GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
void glTexCoord4fColor4fNormal3fVertex4fSUN(GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
void glMultiTexImage3DEXT(GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
void glCoverageModulationTableNV(GLsizei n, const GLfloat *v);
void glPointParameteriv(GLenum pname, const GLint *params);
void glMultiTexCoord4svARB(GLenum target, const GLshort *v);
void glNormal3fv(const GLfloat *v);
void glProgramUniformMatrix3x4dvEXT(GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
void glTexCoord1fv(const GLfloat *v);
void glNormal3xvOES(const GLfixed *coords);
void glGetActiveVaryingNV(GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
void glUniform1i64vARB(GLint location, GLsizei count, const GLint64 *value);
void glMultiTexCoord1dv(GLenum target, const GLdouble *v);
void glTexCoord3fv(const GLfloat *v);
void glGetFirstPerfQueryIdINTEL(GLuint *queryId);
void glProgramUniform1ui64vNV(GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
void glTextureMaterialEXT(GLenum face, GLenum mode);
void glMultiTexCoordP3uiv(GLenum texture, GLenum type, const GLuint *coords);
void glVertexAttribP3ui(GLuint index, GLenum type, GLboolean normalized, GLuint value);
void glColor3fVertex3fSUN(GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
void glProgramLocalParameterI4iNV(GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
void glVertexAttribL2dEXT(GLuint index, GLdouble x, GLdouble y);
void glGetPixelTransformParameterivEXT(GLenum target, GLenum pname, GLint *params);
void glTexCoord4xvOES(const GLfixed *coords);
void glDepthRange(GLdouble near, GLdouble far);
void glGetVertexAttribdvARB(GLuint index, GLenum pname, GLdouble *params);
void glGetColorTableParameterfv(GLenum target, GLenum pname, GLfloat *params);
void glDrawArraysInstancedEXT(GLenum mode, GLint start, GLsizei count, GLsizei primcount);
void glDisableClientStateIndexedEXT(GLenum array, GLuint index);
void glDrawBuffer(GLenum buf);
void glMultiDrawArraysIndirectBindlessNV(GLenum mode, const void *indirect, GLsizei drawCount, GLsizei stride, GLint vertexBufferCount);
void glGetnPixelMapusv(GLenum map, GLsizei bufSize, GLushort *values);
void glRasterPos3fv(const GLfloat *v);
void glClearBufferuiv(GLenum buffer, GLint drawbuffer, const GLuint *value);
void glReadnPixelsKHR(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
void glGetInternalformati64v(GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint64 *params);
void glShaderSourceARB(GLhandleARB shaderObj, GLsizei count, const GLcharARB **string, const GLint *length);
void glShaderOp3EXT(GLenum op, GLuint res, GLuint arg1, GLuint arg2, GLuint arg3);
void glWindowPos2dvMESA(const GLdouble *v);
void glClearIndex(GLfloat c);
void glProvokingVertexEXT(GLenum mode);
void glVertexAttribIPointer(GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
void glNormalStream3bvATI(GLenum stream, const GLbyte *coords);
void glFlush();
void glGetColorTableParameterivEXT(GLenum target, GLenum pname, GLint *params);
void glPresentFrameDualFillNV(GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLenum target1, GLuint fill1, GLenum target2, GLuint fill2, GLenum target3, GLuint fill3);
GLuint glGenVertexShadersEXT(GLuint range);
void glProgramUniformHandleui64vARB(GLuint program, GLint location, GLsizei count, const GLuint64 *values);
void glEvaluateDepthValuesARB();
void glDrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
void glGetTexLevelParameteriv(GLenum target, GLint level, GLenum pname, | |
GLCDFONT = [
[0x00, 0x00, 0x00, 0x00, 0x00],
[0x3E, 0x5B, 0x4F, 0x5B, 0x3E],
[0x3E, 0x6B, 0x4F, 0x6B, 0x3E],
[0x1C, 0x3E, 0x7C, 0x3E, 0x1C],
[0x18, 0x3C, 0x7E, 0x3C, 0x18],
[0x1C, 0x57, 0x7D, 0x57, 0x1C],
[0x1C, 0x5E, 0x7F, 0x5E, 0x1C],
[0x00, 0x18, 0x3C, 0x18, 0x00],
[0xFF, 0xE7, 0xC3, 0xE7, 0xFF],
[0x00, 0x18, 0x24, 0x18, 0x00],
[0xFF, 0xE7, 0xDB, 0xE7, 0xFF],
[0x30, 0x48, 0x3A, 0x06, 0x0E],
[0x26, 0x29, 0x79, 0x29, 0x26],
[0x40, 0x7F, 0x05, 0x05, 0x07],
[0x40, 0x7F, 0x05, 0x25, 0x3F],
[0x5A, 0x3C, 0xE7, 0x3C, 0x5A],
[0x7F, 0x3E, 0x1C, 0x1C, 0x08],
[0x08, 0x1C, 0x1C, 0x3E, 0x7F],
[0x14, 0x22, 0x7F, 0x22, 0x14],
[0x5F, 0x5F, 0x00, 0x5F, 0x5F],
[0x06, 0x09, 0x7F, 0x01, 0x7F],
[0x00, 0x66, 0x89, 0x95, 0x6A],
[0x60, 0x60, 0x60, 0x60, 0x60],
[0x94, 0xA2, 0xFF, 0xA2, 0x94],
[0x08, 0x04, 0x7E, 0x04, 0x08],
[0x10, 0x20, 0x7E, 0x20, 0x10],
[0x08, 0x08, 0x2A, 0x1C, 0x08],
[0x08, 0x1C, 0x2A, 0x08, 0x08],
[0x1E, 0x10, 0x10, 0x10, 0x10],
[0x0C, 0x1E, 0x0C, 0x1E, 0x0C],
[0x30, 0x38, 0x3E, 0x38, 0x30],
[0x06, 0x0E, 0x3E, 0x0E, 0x06],
[0x00, 0x00, 0x00, 0x00, 0x00],
[0x00, 0x00, 0x5F, 0x00, 0x00],
[0x00, 0x07, 0x00, 0x07, 0x00],
[0x14, 0x7F, 0x14, 0x7F, 0x14],
[0x24, 0x2A, 0x7F, 0x2A, 0x12],
[0x23, 0x13, 0x08, 0x64, 0x62],
[0x36, 0x49, 0x56, 0x20, 0x50],
[0x00, 0x08, 0x07, 0x03, 0x00],
[0x00, 0x1C, 0x22, 0x41, 0x00],
[0x00, 0x41, 0x22, 0x1C, 0x00],
[0x2A, 0x1C, 0x7F, 0x1C, 0x2A],
[0x08, 0x08, 0x3E, 0x08, 0x08],
[0x00, 0x80, 0x70, 0x30, 0x00],
[0x08, 0x08, 0x08, 0x08, 0x08],
[0x00, 0x00, 0x60, 0x60, 0x00],
[0x20, 0x10, 0x08, 0x04, 0x02],
[0x3E, 0x51, 0x49, 0x45, 0x3E],
[0x00, 0x42, 0x7F, 0x40, 0x00],
[0x72, 0x49, 0x49, 0x49, 0x46],
[0x21, 0x41, 0x49, 0x4D, 0x33],
[0x18, 0x14, 0x12, 0x7F, 0x10],
[0x27, 0x45, 0x45, 0x45, 0x39],
[0x3C, 0x4A, 0x49, 0x49, 0x31],
[0x41, 0x21, 0x11, 0x09, 0x07],
[0x36, 0x49, 0x49, 0x49, 0x36],
[0x46, 0x49, 0x49, 0x29, 0x1E],
[0x00, 0x00, 0x14, 0x00, 0x00],
[0x00, 0x40, 0x34, 0x00, 0x00],
[0x00, 0x08, 0x14, 0x22, 0x41],
[0x14, 0x14, 0x14, 0x14, 0x14],
[0x00, 0x41, 0x22, 0x14, 0x08],
[0x02, 0x01, 0x59, 0x09, 0x06],
[0x3E, 0x41, 0x5D, 0x59, 0x4E],
[0x7C, 0x12, 0x11, 0x12, 0x7C],
[0x7F, 0x49, 0x49, 0x49, 0x36],
[0x3E, 0x41, 0x41, 0x41, 0x22],
[0x7F, 0x41, 0x41, 0x41, 0x3E],
[0x7F, 0x49, 0x49, 0x49, 0x41],
[0x7F, 0x09, 0x09, 0x09, 0x01],
[0x3E, 0x41, 0x41, 0x51, 0x73],
[0x7F, 0x08, 0x08, 0x08, 0x7F],
[0x00, 0x41, 0x7F, 0x41, 0x00],
[0x20, 0x40, 0x41, 0x3F, 0x01],
[0x7F, 0x08, 0x14, 0x22, 0x41],
[0x7F, 0x40, 0x40, 0x40, 0x40],
[0x7F, 0x02, 0x1C, 0x02, 0x7F],
[0x7F, 0x04, 0x08, 0x10, 0x7F],
[0x3E, 0x41, 0x41, 0x41, 0x3E],
[0x7F, 0x09, 0x09, 0x09, 0x06],
[0x3E, 0x41, 0x51, 0x21, 0x5E],
[0x7F, 0x09, 0x19, 0x29, 0x46],
[0x26, 0x49, 0x49, 0x49, 0x32],
[0x03, 0x01, 0x7F, 0x01, 0x03],
[0x3F, 0x40, 0x40, 0x40, 0x3F],
[0x1F, 0x20, 0x40, 0x20, 0x1F],
[0x3F, 0x40, 0x38, 0x40, 0x3F],
[0x63, 0x14, 0x08, 0x14, 0x63],
[0x03, 0x04, 0x78, 0x04, 0x03],
[0x61, 0x59, 0x49, 0x4D, 0x43],
[0x00, 0x7F, 0x41, 0x41, 0x41],
[0x02, 0x04, 0x08, 0x10, 0x20],
[0x00, 0x41, 0x41, 0x41, 0x7F],
[0x04, 0x02, 0x01, 0x02, 0x04],
[0x40, 0x40, 0x40, 0x40, 0x40],
[0x00, 0x03, 0x07, 0x08, 0x00],
[0x20, 0x54, 0x54, 0x78, 0x40],
[0x7F, 0x28, 0x44, 0x44, 0x38],
[0x38, 0x44, 0x44, 0x44, 0x28],
[0x38, 0x44, 0x44, 0x28, 0x7F],
[0x38, 0x54, 0x54, 0x54, 0x18],
[0x00, 0x08, 0x7E, 0x09, 0x02],
[0x18, 0xA4, 0xA4, 0x9C, 0x78],
[0x7F, 0x08, 0x04, 0x04, 0x78],
[0x00, 0x44, 0x7D, 0x40, 0x00],
[0x20, 0x40, 0x40, 0x3D, 0x00],
[0x7F, 0x10, 0x28, 0x44, 0x00],
[0x00, 0x41, 0x7F, 0x40, 0x00],
[0x7C, 0x04, 0x78, 0x04, 0x78],
[0x7C, 0x08, 0x04, 0x04, 0x78],
[0x38, 0x44, 0x44, 0x44, 0x38],
[0xFC, 0x18, 0x24, 0x24, 0x18],
[0x18, 0x24, 0x24, 0x18, 0xFC],
[0x7C, 0x08, 0x04, 0x04, 0x08],
[0x48, 0x54, 0x54, 0x54, 0x24],
[0x04, 0x04, 0x3F, 0x44, 0x24],
[0x3C, 0x40, 0x40, 0x20, 0x7C],
[0x1C, 0x20, 0x40, 0x20, 0x1C],
[0x3C, 0x40, 0x30, 0x40, 0x3C],
[0x44, 0x28, 0x10, 0x28, 0x44],
[0x4C, 0x90, 0x90, 0x90, 0x7C],
[0x44, 0x64, 0x54, 0x4C, 0x44],
[0x00, 0x08, 0x36, 0x41, 0x00],
[0x00, 0x00, 0x77, 0x00, 0x00],
[0x00, 0x41, 0x36, 0x08, 0x00],
[0x02, 0x01, 0x02, 0x04, 0x02],
[0x3C, 0x26, 0x23, 0x26, 0x3C],
[0x1E, 0xA1, 0xA1, 0x61, 0x12],
[0x3A, 0x40, 0x40, 0x20, 0x7A],
[0x38, 0x54, 0x54, 0x55, 0x59],
[0x21, 0x55, 0x55, 0x79, 0x41],
[0x21, 0x54, 0x54, 0x78, 0x41],
[0x21, 0x55, 0x54, 0x78, 0x40],
[0x20, 0x54, 0x55, 0x79, 0x40],
[0x0C, 0x1E, 0x52, 0x72, 0x12],
[0x39, 0x55, 0x55, 0x55, 0x59],
[0x39, 0x54, 0x54, 0x54, 0x59],
[0x39, 0x55, 0x54, 0x54, 0x58],
[0x00, 0x00, 0x45, 0x7C, 0x41],
[0x00, 0x02, 0x45, 0x7D, 0x42],
[0x00, 0x01, 0x45, 0x7C, 0x40],
[0xF0, 0x29, 0x24, 0x29, 0xF0],
[0xF0, 0x28, 0x25, 0x28, 0xF0],
[0x7C, 0x54, 0x55, 0x45, 0x00],
[0x20, 0x54, 0x54, 0x7C, 0x54],
[0x7C, 0x0A, 0x09, 0x7F, 0x49],
[0x32, 0x49, 0x49, 0x49, 0x32],
[0x32, 0x48, 0x48, 0x48, 0x32],
[0x32, 0x4A, 0x48, 0x48, 0x30],
[0x3A, 0x41, 0x41, 0x21, 0x7A],
[0x3A, 0x42, 0x40, 0x20, 0x78],
[0x00, 0x9D, 0xA0, 0xA0, 0x7D],
[0x39, 0x44, 0x44, 0x44, 0x39],
[0x3D, 0x40, 0x40, 0x40, 0x3D],
[0x3C, 0x24, 0xFF, 0x24, 0x24],
[0x48, 0x7E, 0x49, 0x43, 0x66],
[0x2B, 0x2F, 0xFC, 0x2F, 0x2B],
[0xFF, 0x09, 0x29, 0xF6, 0x20],
[0xC0, 0x88, 0x7E, 0x09, 0x03],
[0x20, 0x54, 0x54, 0x79, 0x41],
[0x00, 0x00, 0x44, 0x7D, 0x41],
[0x30, 0x48, 0x48, 0x4A, 0x32],
[0x38, 0x40, 0x40, 0x22, 0x7A],
[0x00, 0x7A, 0x0A, 0x0A, 0x72],
[0x7D, 0x0D, 0x19, 0x31, 0x7D],
[0x26, 0x29, 0x29, 0x2F, 0x28],
[0x26, 0x29, 0x29, 0x29, 0x26],
[0x30, 0x48, 0x4D, 0x40, 0x20],
[0x38, 0x08, 0x08, 0x08, 0x08],
[0x08, 0x08, 0x08, 0x08, 0x38],
[0x2F, 0x10, 0xC8, 0xAC, 0xBA],
[0x2F, 0x10, 0x28, 0x34, 0xFA],
[0x00, 0x00, 0x7B, 0x00, 0x00],
[0x08, 0x14, 0x2A, 0x14, 0x22],
[0x22, 0x14, 0x2A, 0x14, 0x08],
[0xAA, 0x00, 0x55, 0x00, 0xAA],
[0xAA, 0x55, 0xAA, 0x55, 0xAA],
[0x00, 0x00, 0x00, 0xFF, 0x00],
[0x10, 0x10, 0x10, 0xFF, 0x00],
[0x14, 0x14, 0x14, 0xFF, 0x00],
[0x10, 0x10, 0xFF, 0x00, 0xFF],
[0x10, 0x10, 0xF0, 0x10, 0xF0],
[0x14, 0x14, 0x14, 0xFC, 0x00],
[0x14, 0x14, 0xF7, 0x00, 0xFF],
[0x00, 0x00, 0xFF, 0x00, 0xFF],
[0x14, 0x14, 0xF4, 0x04, 0xFC],
[0x14, 0x14, 0x17, 0x10, 0x1F],
[0x10, 0x10, 0x1F, 0x10, 0x1F],
[0x14, 0x14, 0x14, 0x1F, 0x00],
[0x10, 0x10, 0x10, 0xF0, 0x00],
[0x00, 0x00, 0x00, 0x1F, 0x10],
[0x10, 0x10, 0x10, 0x1F, 0x10],
[0x10, 0x10, 0x10, 0xF0, 0x10],
[0x00, 0x00, 0x00, 0xFF, 0x10],
[0x10, 0x10, 0x10, 0x10, 0x10],
[0x10, 0x10, 0x10, 0xFF, 0x10],
[0x00, 0x00, 0x00, 0xFF, 0x14],
[0x00, 0x00, 0xFF, 0x00, 0xFF],
[0x00, 0x00, 0x1F, 0x10, 0x17],
[0x00, 0x00, 0xFC, 0x04, 0xF4],
[0x14, 0x14, 0x17, 0x10, 0x17],
[0x14, 0x14, 0xF4, 0x04, 0xF4],
[0x00, 0x00, 0xFF, 0x00, 0xF7],
[0x14, 0x14, 0x14, 0x14, 0x14],
[0x14, 0x14, 0xF7, 0x00, 0xF7],
[0x14, 0x14, 0x14, 0x17, 0x14],
[0x10, 0x10, 0x1F, 0x10, 0x1F],
[0x14, 0x14, 0x14, 0xF4, 0x14],
[0x10, 0x10, 0xF0, 0x10, 0xF0],
[0x00, 0x00, 0x1F, 0x10, 0x1F],
[0x00, 0x00, 0x00, 0x1F, 0x14],
[0x00, 0x00, 0x00, 0xFC, 0x14],
[0x00, 0x00, 0xF0, 0x10, 0xF0],
[0x10, 0x10, 0xFF, 0x10, 0xFF],
[0x14, 0x14, 0x14, 0xFF, 0x14],
[0x10, 0x10, 0x10, 0x1F, 0x00],
[0x00, 0x00, 0x00, 0xF0, 0x10],
[0xFF, 0xFF, 0xFF, 0xFF, 0xFF],
[0xF0, 0xF0, 0xF0, 0xF0, 0xF0],
[0xFF, 0xFF, 0xFF, 0x00, 0x00],
[0x00, 0x00, 0x00, 0xFF, 0xFF],
[0x0F, 0x0F, 0x0F, 0x0F, 0x0F],
[0x38, 0x44, 0x44, 0x38, 0x44],
[0x7C, 0x2A, 0x2A, 0x3E, 0x14],
[0x7E, 0x02, 0x02, 0x06, 0x06],
[0x02, 0x7E, 0x02, 0x7E, 0x02],
[0x63, 0x55, 0x49, 0x41, 0x63],
[0x38, 0x44, 0x44, 0x3C, 0x04],
[0x40, 0x7E, 0x20, 0x1E, 0x20],
[0x06, 0x02, 0x7E, 0x02, 0x02],
[0x99, 0xA5, 0xE7, 0xA5, 0x99],
[0x1C, 0x2A, 0x49, 0x2A, 0x1C],
[0x4C, 0x72, 0x01, 0x72, 0x4C],
[0x30, 0x4A, 0x4D, 0x4D, 0x30],
[0x30, 0x48, 0x78, 0x48, 0x30],
[0xBC, 0x62, 0x5A, 0x46, 0x3D],
[0x3E, 0x49, 0x49, 0x49, 0x00],
[0x7E, 0x01, 0x01, 0x01, 0x7E],
[0x2A, 0x2A, 0x2A, 0x2A, 0x2A],
[0x44, 0x44, 0x5F, 0x44, 0x44],
[0x40, 0x51, 0x4A, 0x44, 0x40],
[0x40, 0x44, 0x4A, 0x51, 0x40],
[0x00, 0x00, 0xFF, 0x01, 0x03],
[0xE0, 0x80, 0xFF, 0x00, 0x00],
[0x08, 0x08, 0x6B, 0x6B, 0x08],
[0x36, 0x12, 0x36, 0x24, 0x36],
[0x06, 0x0F, 0x09, 0x0F, 0x06],
[0x00, 0x00, 0x18, 0x18, 0x00],
[0x00, 0x00, 0x10, 0x10, 0x00],
[0x30, 0x40, 0xFF, 0x01, 0x01],
[0x00, 0x1F, 0x01, 0x01, 0x1E],
[0x00, 0x19, 0x1D, 0x17, 0x12],
[0x00, 0x3C, 0x3C, 0x3C, 0x3C],
[0x00, 0x00, 0x00, 0x00, 0x00],
[0x00, 0x00, | |
cmds.setAttr('%s.scaleX' % mirror_group, -1)
deform.create_wrap(home, other_target_mesh_duplicate)
cmds.blendShape(other_mesh_duplicate, home, foc = True, w = [0, 1])
cmds.delete(other_target_mesh_duplicate, ch = True)
cmds.delete(mirror_group, other_mesh_duplicate)
return other_target_mesh, other_target_mesh_duplicate
def _delete_connected_nodes(self):
nodes = self._get_connected_nodes()
if nodes:
cmds.delete(nodes)
def _create_shader(self, mesh):
shader_name = 'pose_blinn'
shader_name = shade.apply_new_shader(mesh, type_of_shader = 'blinn', name = shader_name)
cmds.setAttr('%s.color' % shader_name, 0.4, 0.6, 0.4, type = 'double3' )
cmds.setAttr('%s.specularColor' % shader_name, 0.3, 0.3, 0.3, type = 'double3' )
cmds.setAttr('%s.eccentricity' % shader_name, .3 )
def _get_blendshape(self, mesh):
return deform.find_deformer_by_type(mesh, 'blendShape')
def _get_current_mesh(self, mesh_index):
mesh = None
if mesh_index == None:
return
#mesh = self.get_mesh(self.mesh_index)
if mesh_index != None:
mesh = self.get_mesh(mesh_index)
if not mesh:
return
return mesh
def _update_inbetween(self):
poses = PoseManager().get_poses()
weights = []
for pose in poses:
pose_instance = PoseManager().get_pose_instance(pose)
weight = pose_instance.get_inbetween_weight()
weights.append(weight)
for pose in poses:
pass
def _replace_side(self, string_value, left_right = True):
if string_value == None:
return
split_value = string_value.split('|')
split_value.reverse()
fixed = []
for value in split_value:
other = ''
if left_right:
start, end = util.find_special('L', value, 'end')
if start != None:
other = util.replace_string(value, 'R', start, end)
if not other:
start, end = util.find_special('_L_', value, 'last')
if start != None:
other = util.replace_string(value, '_R_', start, end)
if not other:
start, end = util.find_special('lf_', value, 'start')
if start != None:
other = util.replace_string(value, 'rt_', start, end)
if not other:
start,end = util.find_special('l_', value, 'start')
if start != None:
other = util.replace_string(value, 'r_', start, end)
if not left_right:
start, end = util.find_special('R', value, 'end')
if start != None:
other = util.replace_string(value, 'L', start, end)
if not other:
start, end = util.find_special('_R_', value, 'last')
if start != None:
other = util.replace_string(value, '_L_', start, end)
if not other:
start, end = util.find_special('rt_', value, 'first')
if start != None:
other = util.replace_string(value, 'lf_', start, end)
if not other:
start,end = util.find_special('r_', value, 'first')
if start != None:
other = util.replace_string(value, 'l_', start, end)
fixed.append(other)
if len(fixed) == 1:
return fixed[0]
fixed.reverse()
fixed = '|'.join(fixed)
return fixed
def _set_visibility(self, node, bool_value):
if bool_value:
try:
cmds.setAttr('%s.lodVisibility' % node, 1)
cmds.setAttr('%s.visibility' % node, 1)
except:
pass
#util.show( 'Could not set visibility on %s.' % node )
if not bool_value:
try:
cmds.setAttr('%s.lodVisibility' % node, 0)
cmds.setAttr('%s.visibility' % node, 0)
except:
pass
#util.show( 'Could not set visibility on %s.' % node )
def _initialize_blendshape_node(self, target_mesh):
blend = blendshape.BlendShape()
blendshape_node = self._get_blendshape(target_mesh)
referenced = False
if blendshape_node:
referenced = core.is_referenced(blendshape_node)
if blendshape_node and not referenced:
blend.set(blendshape_node)
if not blendshape_node or referenced:
blend.create(target_mesh)
if referenced:
skin_cluster = deform.find_deformer_by_type(target_mesh, 'skinCluster')
if skin_cluster:
try:
cmds.reorderDeformers(skin_cluster, blend.blendshape, target_mesh)
except:
pass
if not skin_cluster:
cmds.reorderDeformers(blend.blendshape, blendshape_node, target_mesh)
return blend
#--- pose
def set_pose(self, pose_name):
"""
Set the pose that the instance should work on.
Args:
pose_name (str): The name of a pose.
"""
super(PoseBase, self).set_pose(pose_name)
if self.pose_control == pose_name:
self._refresh_pose_control()
self._refresh_meshes()
def rename(self, description):
"""
Rename the pose and the target on the blendshape.
Args:
description (str): The new name for the pose.
Returns:
str: The new name.
"""
old_description = util.clean_name_string( self.description )
super(PoseBase, self).rename(description)
meshes = self.get_target_meshes()
for mesh in meshes:
blendshape_node = self._get_blendshape(mesh)
if blendshape_node:
blend = blendshape.BlendShape(blendshape_node)
blend.rename_target(old_description, self.description)
self._rename_nodes()
return self.pose_control
def delete(self):
"""
Delete the pose and pose related nodes.
"""
self.delete_blend_input()
self._delete_connected_nodes()
super(PoseBase, self).delete()
#--- mesh
def has_a_mesh(self):
"""
Check if the pose has a mesh.
Returns:
bool: Wether the pose has a mesh or not.
"""
if self._get_mesh_message_attributes():
return True
return False
def add_mesh(self, mesh, toggle_vis = True):
"""
Add a mesh to the pose.
Args:
mesh (str): The name of a mesh.
toggle_vis (bool): Wether to toggle the meshes visibility.
Returns:
str: Returns: the name of the created pose mesh for sculpting. Return False if failed.
"""
mesh = cmds.ls(mesh, l = True)
if not mesh:
return
if len(mesh) >= 1:
mesh = mesh[0]
if mesh.find('.vtx'):
mesh = mesh.split('.')[0]
if not geo.get_mesh_shape(mesh):
return False
if self._check_if_mesh_connected(mesh):
return False
if self._check_if_mesh_is_child(mesh):
return False
target_meshes = self.get_target_meshes()
if mesh in target_meshes:
index = self.get_target_mesh_index(mesh)
return self.get_mesh(index)
deform.set_envelopes(mesh, 0, ['skinCluster', 'blendShape', 'cluster'])
pose_mesh = cmds.duplicate(mesh, n = core.inc_name('mesh_%s_1' % self.pose_control))[0]
deform.set_envelopes(mesh, 1)
self._create_shader(pose_mesh)
attr.unlock_attributes(pose_mesh)
cmds.parent(pose_mesh, self.pose_control)
self._connect_mesh(pose_mesh)
string_var = attr.MayaStringVariable('mesh_pose_source')
string_var.create(pose_mesh)
string_var.set_value(mesh)
if toggle_vis:
index = self.get_target_mesh_index(mesh)
self.toggle_vis(index)
return pose_mesh
def remove_mesh(self, mesh):
"""
Remove a mesh from the pose.
Args:
mesh (str): The name of a mesh affected by the pose.
"""
index = self.get_target_mesh_index(mesh)
mesh = self.get_mesh(index)
self.visibility_off(mesh)
if index == None:
return
if mesh == None:
return
if mesh and cmds.objExists(mesh):
blend_name = self.get_blendshape(index)
if blend_name:
nicename = core.get_basename(self.pose_control, remove_namespace=True)
blend = blendshape.BlendShape(blend_name)
blend.remove_target(nicename)
attributes = self._get_mesh_message_attributes()
attribute = attributes[index]
cmds.delete(mesh)
attr.disconnect_attribute(attribute)
def get_mesh(self, index):
"""
Get the sculpt mesh at the index. Sculpt mesh is the mesh used to generate the delta.
Args:
index (int): The index of a sculpt mesh.
Returns:
str: The name of the sculpt mesh at the index.
"""
if index == None:
return
mesh_attributes = self._get_mesh_message_attributes()
if not mesh_attributes:
return
if index > (len(mesh_attributes)-1):
return
mesh = attr.get_attribute_input('%s.%s' % (self.pose_control, mesh_attributes[index]), True)
return mesh
def get_mesh_count(self):
"""
Get the number of meshes the pose affects.
Returns:
int
"""
attrs = self._get_mesh_message_attributes()
if attrs:
return len(attrs)
return 0
def get_target_meshes(self):
"""
Get the meshes affected by the pose.
Returns:
list: A list of the names of meshes.
"""
meshes = []
for inc in range(0, self._get_mesh_count()):
mesh = self.get_mesh(inc)
mesh = self.get_target_mesh(mesh)
meshes.append(mesh)
return meshes
def get_sculpt_mesh(self, target_mesh):
index = self.get_target_mesh_index(target_mesh)
return self.get_mesh(index)
def get_target_mesh(self, mesh):
"""
Get the mesh that the sculpt mesh affects.
Args:
mesh (str): The name of a mesh affected by the pose.
Returns:
str: The name of a mesh.
"""
long_name = None
if cmds.objExists('%s.mesh_pose_source' % mesh):
target_mesh = cmds.getAttr('%s.mesh_pose_source' % mesh)
namespace = core.get_namespace(self.pose_control)
if namespace:
basename = core.get_basename(target_mesh, remove_namespace = True)
target_mesh = '%s:%s' % (namespace, basename)
if cmds.objExists(target_mesh):
return target_mesh
else:
return None
long_name = target_mesh
if cmds.objExists(target_mesh):
long_name = cmds.ls(target_mesh, l = True)[0]
if long_name != target_mesh:
cmds.setAttr('%s.mesh_pose_source' % mesh, long_name, type = 'string')
if not cmds.objExists(long_name):
target_mesh = core.get_basename(long_name)
if cmds.objExists(target_mesh):
long_name = cmds.ls(target_mesh, l = True)[0]
cmds.setAttr('%s.mesh_pose_source' % mesh, long_name, type = 'string')
if not cmds.objExists(target_mesh):
long_name = target_mesh
return long_name
def get_target_mesh_index(self, target_mesh):
"""
Get the index of a target mesh. Target meshes are the meshes that have the delta applied to them.
Args:
target_mesh (str): The name of a target mesh.
Returns:
int: The index of the mesh.
"""
target_meshes = self.get_target_meshes()
longname_target_mesh = cmds.ls(target_mesh, l = True)
if longname_target_mesh:
target_mesh = longname_target_mesh[0]
if self.pose_control:
namespace = core.get_namespace(self.pose_control)
if namespace:
basename = core.get_basename(target_mesh, remove_namespace = True)
target_mesh = '%s:%s' % (namespace, basename)
inc = 0
for target_mesh_test in target_meshes:
if target_mesh == target_mesh_test:
return inc
inc += 1
def get_mesh_index(self, mesh):
"""
Get | |
# coding=utf-8
"""
SPDX-license-identifier: Apache-2.0
Copyright (c) 2020 SmartMe.IO
Authors: <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License
"""
import subprocess
import time
import netifaces
import os
from arancino.Arancino import Arancino
from arancino.utils.ArancinoUtils import ArancinoConfig, secondsToHumanString, ArancinoLogger
from arancino.ArancinoConstants import ArancinoApiResponseCode
from arancino.ArancinoPortSynchronizer import ArancinoPortSynch
from arancino.ArancinoConstants import ArancinoDBKeys
from uptime import uptime
from socket import gethostname, gethostbyname
from platform import system, release
from arancino.port.ArancinoPort import PortTypes
API_CODE = ArancinoApiResponseCode()
DB_KEYS = ArancinoDBKeys()
CONF = ArancinoConfig.Instance()
LOG = ArancinoLogger.Instance().getLogger()
TRACE = CONF.get_log_print_stack_trace()
class ArancinoApi():
def __init__(self):
self.__arancino = Arancino()
#self.__conf = ArancinoConfig.Instance()
self.__synchronizer = ArancinoPortSynch()
#### QUERIES ####
def hello(self):
try:
sys_upt = uptime()
ara_upt = self.__arancino.getUptime()
c = self.__getListOfPortConnected()
d = self.__getListOfPortDiscovered()
response = {
"arancino": {
"system": {
"os": self.__getOsInfo(),
"network": {
"hostname": gethostname(),
"ifaces": self.__getNetwork(), #[gethostname(), gethostbyname(gethostname())],
},
"uptime": [sys_upt, secondsToHumanString(int(sys_upt))]
},
"arancino": {
"uptime" : [ara_upt, secondsToHumanString(int(ara_upt))],
"version": str(CONF.get_metadata_version()),
"ports": {
"discovered": d,
"connected": c
},
"env": {
"ARANCINO": os.environ.get('ARANCINO'),
"ARANCINOCONF": os.environ.get('ARANCINOCONF'),
"ARANCINOLOG": os.environ.get('ARANCINOLOG'),
"ARANCINOENV": os.environ.get('ARANCINOENV')
}
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def arancino(self):
try:
ara_upt = self.__arancino.getUptime()
c = self.__getListOfPortConnected()
d = self.__getListOfPortDiscovered()
response = {
"arancino": {
"arancino": {
"uptime": [ara_upt, secondsToHumanString(int(ara_upt))],
"version": str(CONF.get_metadata_version()),
"ports": {
"discovered": d,
"connected": c
},
"env":{
"ARANCINO": os.environ.get('ARANCINO'),
"ARANCINOCONF": os.environ.get('ARANCINOCONF'),
"ARANCINOLOG": os.environ.get('ARANCINOLOG'),
"ARANCINOENV": os.environ.get('ARANCINOENV')
}
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def system(self):
try:
sys_upt = uptime()
response = {
"arancino": {
"system": {
"os": self.__getOsInfo(),
"network": {
"hostname": gethostname(),
"ifaces": self.__getNetwork(), # [gethostname(), gethostbyname(gethostname())],
},
"uptime": [sys_upt, secondsToHumanString(int(sys_upt))]
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def getAllPorts(self):
try:
ports_conn = []
for id, port in self.__arancino.getConnectedPorts().items():
rp = self.__apiCreatePortResponse(port)
ports_conn.append(rp)
ports_disc = []
for id, port in self.__arancino.getDiscoveredPorts().items():
rp = self.__apiCreatePortResponse(port)
ports_disc.append(rp)
response = {
"arancino" : {
"arancino" : {
"ports" : {
"connected": ports_conn,
"discovered": ports_disc
}
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def getPort(self, port_id=None):
try:
response = {}
port = self.__arancino.findPort(port_id)
if port is not None:
response = {
"arancino": {
"arancino": {
"port": self.__apiCreatePortResponse(port)
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def getPortsConnected(self):
try:
ports = []
for id, port in self.__arancino.getConnectedPorts().items():
rp = self.__apiCreatePortResponse(port)
ports.append(rp)
response = {
"arancino" : {
"arancino" : {
"ports" : {
"connected": ports
}
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)))
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def getPortsDiscovered(self):
try:
ports = []
for id, port in self.__arancino.getDiscoveredPorts().items():
rp = self.__apiCreatePortResponse(port)
ports.append(rp)
response = {
"arancino" : {
"arancino" : {
"ports" : {
"discovered": ports
}
}
}
}
return response, 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def __getArancinoConf(self):
# return all the configurations
try:
config = CONF.get_config_all()
response = {
"arancino": {
"config": config
}
}
return response, 200
except Exception as ex:
raise ex
def getArancinoConf(self, params=None):
try:
if(params and params["config"]): # check if there's the "config" key in the json, else return all the configuration.
config = {}
for it in params["config"]:
section = it["section"]
option = it["option"]
if section not in config:
config[section] = {}
#if option not in config[section]:
config[section][option] = CONF.get_config_by_name(section, option)
print(config)
response = {
"arancino": {
"config": config
}
}
return response, 200
else:
return self.__getArancinoConf(), 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
#
# def getArancinoConf(self, section, option):
# pass
#### OPERATIONS ####
def resetPort(self, port_id):
try:
port = self.__arancino.findPort(port_id)
if port:
self.__arancino.pauseArancinoThread()
while not self.__arancino.isPaused():
pass
result = port.reset()
self.__arancino.resumeArancinoThread()
if result:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_RESET), 200
else: # when it is None means that no reset procedure is provided.
return self.__apiCreateErrorMessage(error_code=API_CODE.OK_RESET_NOT_PROVIDED), 500
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 200
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_RESET, internal_message=[None, str(ex)]), 500
def enablePort(self, port_id):
try:
port = self.__arancino.findPort(port_id)
if port:
new_status = True
curr_status = port.isEnabled()
if new_status == curr_status:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_ALREADY_ENABLED), 200
else:
port.setEnabled(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
while not port.isConnected():
time.sleep(1)
return self.__apiCreateOkMessage(response_code=API_CODE.OK_ENABLED), 200
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def disablePort(self, port_id):
# NOTE: in realta sono due operazioni: 1) disable 2) disconnect. Forse é il caso di dare due messaggi nella
# response, visto che il pacchetto JSON di ritorno prevede un array di messaggi e/o errori
try:
port = self.__arancino.findPort(port_id)
if port:
new_status = False
curr_status = port.isEnabled()
if new_status == curr_status:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_ALREADY_DISABLED), 200
else:
port.setEnabled(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
while port.isConnected():
time.sleep(1)
return self.__apiCreateOkMessage(response_code=API_CODE.OK_DISABLED), 200
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def uploadPortFirmware(self, port_id, firmware):
try:
port = self.__arancino.findPort(port_id)
if port:
self.__arancino.pauseArancinoThread()
while not self.__arancino.isPaused():
pass
result = port.upload(firmware)
self.__arancino.resumeArancinoThread()
if result:
rtn_cod = result[0]
std_out = result[1]
std_err = result[2]
if rtn_cod != 0:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_UPLOAD, internal_message=[std_out, std_err]), 500
else:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_UPLOAD, internal_message=[std_out, std_err]), 201
else: # when it is None means that no uploaded procedure is provided.
return self.__apiCreateErrorMessage(error_code=API_CODE.OK_UPLOAD_NOT_PROVIDED), 500
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_UPLOAD, internal_message=[None, str(ex)]), 500
def setPortConfig(self, port_id, config=None):
try:
if not config:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_NO_CONFIG_PROVIDED), 500
port = self.__arancino.findPort(port_id)
if port:
if 'alias' in config:
curr_alias = port.getAlias()
if config['alias'] != curr_alias:
self.__arancino.pauseArancinoThread()
port.setAlias(config['alias'])
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
self.__arancino.resumeArancinoThread()
if 'enable' in config:
curr_status = port.isEnabled()
new_status = str(config['enable']).lower() == 'true'
if new_status != curr_status:
self.__arancino.pauseArancinoThread()
port.setEnabled(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
self.__arancino.resumeArancinoThread()
while port.isConnected() != new_status:
time.sleep(1)
if 'hide' in config:
curr_status = port.isHidden()
new_status = str(config['hide']).lower() == 'true'
if new_status != curr_status:
self.__arancino.pauseArancinoThread()
port.setHide(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
self.__arancino.resumeArancinoThread()
return self.__apiCreateOkMessage(response_code=API_CODE.OK_CONFIGURATED), 200
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def hidePort(self, port_id):
try:
port = self.__arancino.findPort(port_id)
if port:
new_status = True
curr_status = port.isHidden()
if new_status == curr_status:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_ALREADY_HIDDEN), 200
else:
port.setHide(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
return self.__apiCreateOkMessage(response_code=API_CODE.OK_HIDDEN), 200
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def showPort(self, port_id):
try:
port = self.__arancino.findPort(port_id)
if port:
new_status = False
curr_status = port.isHidden()
if new_status == curr_status:
return self.__apiCreateOkMessage(response_code=API_CODE.OK_ALREADY_SHOWN), 200
else:
port.setHide(new_status)
self.__synchronizer.writePortConfig(port) # Note: maybe it's better wrapping this call inside Arancino class.
return self.__apiCreateOkMessage(response_code=API_CODE.OK_SHOWN), 200
else:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_PORT_NOT_FOUND), 500
except Exception as ex:
LOG.error("Error on api call: {}".format(str(ex)), exc_info=TRACE)
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_GENERIC, internal_message=[None, str(ex)]), 500
def setArancinoConf(self, section, option, value):
try:
try:
if section is None or section.strip() == "":
raise Exception("Configuration Section is empty")
except Exception as ex:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_NO_ARANCINO_CONFIG_SECTION_PROVIDED, internal_message=[None, str(ex)]), 500
try:
if option is None or option.strip() == "":
raise Exception("Configuration Option is empty")
except Exception as ex:
return self.__apiCreateErrorMessage(error_code=API_CODE.ERR_NO_ARANCINO_CONFIG_OPTION_PROVIDED, internal_message=[None, str(ex)]), 500
try:
if value is None or value.strip() == "":
raise Exception("Configuration Value is empty")
except Exception | |
cl.sendText(msg.to,"sudah dimatikan ô€œ ô€„‰👈")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ 👈")
elif msg.text in ["Qrprotect off","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ 👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œ ô€„‰👈")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ 👈")
elif msg.text in ["Inviteprotect off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ 👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œ ô€„‰👈")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ 👈")
elif msg.text in ["Cancelprotect off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"hall ini sudah off ô€œ 👈")
else:
cl.sendText(msg.to,"sudah dimatikan ô€œ ô€„‰👈")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already close")
else:
cl.sendText(msg.to,"It is already open ô€œ 👈")
elif "Group cancel:" in msg.text:
try:
strnum = msg.text.replace("Group cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Itu off undangan ditolak👈\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan👈")
else:
cl.sendText(msg.to,"Off undangan ditolak👈Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis👈")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"Weird value🛡")
elif msg.text in ["Auto leave on","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈?")
else:
cl.sendText(msg.to,"Sudah terbuka ?")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈?")
else:
cl.sendText(msg.to,"Is already open👈?")
elif msg.text in ["Auto leave off","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈?")
else:
cl.sendText(msg.to,"Sudah off👈?")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈?")
else:
cl.sendText(msg.to,"Is already close👈?")
elif msg.text in ["Share on","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ?")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"on👈")
elif msg.text in ["Share off","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈?")
else:
cl.sendText(msg.to,"It is already turned off ?👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"Off👈")
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+="✯✯ Contact:on ✯✯\n"
else: md+="✯ Contact:off ✯\n"
if wait["autoJoin"] == True: md+="✯✯ Auto Join:on ✯✯\n"
else: md +="✯ Auto Join:off ✯\n"
if wait["autoCancel"]["on"] == True:md+="✯✯ Auto cancel:" + str(wait["autoCancel"]["members"]) + "✯✯\n"
else: md+= "✯ Group cancel:off ✯\n"
if wait["leaveRoom"] == True: md+="✯✯ Auto leave:on ✯✯\n"
else: md+="✯ Auto leave:off ✯\n"
if wait["timeline"] == True: md+="✯✯ Share:on ✯✯\n"
else:md+="✯ Share:off ✯\n"
if wait["autoAdd"] == True: md+="✯✯ Auto add:on ✯✯\n"
else:md+="✯ Auto add:off \n"
if wait["commentOn"] == True: md+="✯✯ Auto komentar:on ✯✯\n"
else:md+="✯ Auto komentar:off ✯\n"
if wait["protect"] == True: md+="✯✯ Protect:on ✯✯\n"
else:md+="✯ Protect:off ✯\n"
if wait["linkprotect"] == True: md+="✯✯ Link Protect:on ✯✯\n"
else:md+="✯ Link Protect:off ✯\n"
if wait["inviteprotect"] == True: md+="✯✯ Invitation Protect:on ✯✯\n"
else:md+="✯ Invitation Protect:off ✯\n"
if wait["cancelprotect"] == True: md+="✯✯ Cancel Protect:on ✯✯\n"
else:md+="✯ Cancel Protect:off ✯\n"
cl.sendText(msg.to,md)
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': Creator}
cl.sendText(msg.to,"? My Creator ? ")
cl.sendMessage(msg)
cl.sendText(msg.to,"? Dont Kick out From group ? ")
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç» äº†å…¨éƒ¨çš„é‚€è¯·ã€‚")
elif "Set album:" in msg.text:
gid = msg.text.replace("Set album:","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada album👈")
else:
cl.sendText(msg.to,"Dalam album tidak👈")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "æžš\n"
else:
mg += str(y["title"]) + ":0 Pieces\n"
cl.sendText(msg.to,mg)
elif "Album" in msg.text:
gid = msg.text.replace("Album","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada album")
else:
cl.sendText(msg.to,"Dalam album tidak")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "\n"
else:
mg += str(y["title"]) + ":0 pieces\n"
elif "Hapus album " in msg.text:
gid = msg.text.replace("Hapus album ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["gid"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Soal album telah dihapus")
else:
cl.sendText(msg.to,str(i) + "Hapus kesulitan album🛡")
elif msg.text.lower() == 'group id':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Bot out"]:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki7.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif "Album deleted:" in msg.text:
gid = msg.text.replace("Album deleted:","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Soal album telah dihapus👈")
else:
cl.sendText(msg.to,str(i) + "Hapus kesulitan album👈")
elif msg.text in ["Auto add on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["Auto add off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
cl.sendText(msg.to,"We changed the message👈")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
cl.sendText(msg.to,"We changed the Help👈")
elif "Pesan add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set" in msg.text:
c = msg.text.replace("Message set","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Come set:" in msg.text:
c = msg.text.replace("Come set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di👈")
else:
cl.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オン㠫㠗㠾㠗㠟👈")
else:
cl.sendText(msg.to,"è¦ äº†å¼€👈")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "V1 gurl" in msg.text:
if msg.toType == 1:
gid | |
list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear) != 2:
raise ValueError(f"`translate` must be a sequence of length 2. But got: {len(shear)}.")
if isinstance(interpolation, int):
interpolation = interpolation_mode_from_int(interpolation)
if not isinstance(interpolation, InterpolationMode):
raise TypeError(f"`interpolation` must be a `InterpolationMode`. But got: {type(interpolation)}.")
img = image.copy()
h, w = get_image_size(img)
center = (h * 0.5, w * 0.5) if center is None else center # H, W
center = tuple(center[::-1]) # W, H
angle = -angle
R = cv2.getRotationMatrix2D(center=center, angle=angle, scale=scale)
# If keep shape, find the new width and height bounds
if keep_shape:
new_w = w
new_h = h
else:
abs_cos = abs(R[0, 0])
abs_sin = abs(R[0, 1])
new_w = int(h * abs_sin + w * abs_cos)
new_h = int(h * abs_cos + w * abs_sin)
R[0, 2] += (new_w * 0.5 - center[0])
R[1, 2] += (new_h * 0.5 - center[1])
center = (new_w * 0.5, new_h * 0.5) # W, H
T = translate
S = [math.radians(-shear[0]), math.radians(-shear[1])]
M = np.float32([[R[0, 0] , S[0] + R[0, 1], R[0, 2] + T[0] + (-S[0] * center[1])],
[S[1] + R[1, 0], R[1, 1] , R[1, 2] + T[1] + (-S[1] * center[0])],
[0 , 0 , 1]])
img = cv2.warpPerspective(img, M, (new_w, new_h))
return img
@batch_image_processing
def affine(
image : TensorOrArray,
angle : float,
translate : IntAnyT,
scale : float,
shear : FloatAnyT,
center : Optional[ListOrTuple2T[int]] = None,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
keep_shape : bool = True,
pad_mode : Union[PaddingMode, str] = "constant",
fill : Optional[FloatAnyT] = None,
) -> TensorOrArray:
"""Apply affine transformation on the image keeping image center invariant.
Args:
image (TensorOrArray[C, H, W]):
Image to be transformed.
angle (float):
Rotation angle in degrees between -180 and 180, clockwise direction.
translate (IntAnyT):
Horizontal and vertical translations (post-rotation translation).
scale (float):
Overall scale
shear (FloatAnyT):
Shear angle value in degrees between -180 to 180, clockwise
direction. If a sequence is specified, the first value corresponds
to a shear parallel to the x axis, while the second value
corresponds to a shear parallel to the y axis.
center (ListOrTuple2T[int], optional):
Center of affine transformation. If `None`, use the center of the
image. Default: `None`.
interpolation (InterpolationMode):
Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is `InterpolationMode.NEAREST`.
If input is Tensor, only `InterpolationMode.NEAREST`,
`InterpolationMode.BILINEAR` are supported. For backward
compatibility integer values (e.g. `PIL.Image.NEAREST`) are still
acceptable.
keep_shape (bool):
If `True`, expands the output image to make it large enough to
hold the entire rotated image.
If `False` or omitted, make the output image the same size as the
input image.
Note that the `keep_shape` flag assumes rotation around the center
and no translation. Default: `True`.
pad_mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
fill (FloatAnyT, optional):
Pixel fill value for the area outside the transformed image.
If given a number, the value is used for all bands respectively.
Returns:
image (TensorOrArray[C, H, W]):
Transformed image.
"""
if isinstance(image, Tensor):
return _affine_tensor_image(
image = image,
angle = angle,
translate = translate,
scale = scale,
shear = shear,
center = center,
interpolation = interpolation,
keep_shape = keep_shape,
pad_mode = pad_mode,
fill = fill,
)
elif isinstance(image, np.ndarray):
return _affine_numpy_image(
image = image,
angle = angle,
translate = translate,
scale = scale,
shear = shear,
center = center,
interpolation = interpolation,
keep_shape = keep_shape,
pad_mode = pad_mode,
fill = fill,
)
else:
raise ValueError(f"Do not support {type(image)}.")
def affine_image_box(
image : TensorOrArray,
box : TensorOrArray,
angle : float,
translate : IntAnyT,
scale : float,
shear : FloatAnyT,
center : Optional[ListOrTuple2T[int]] = None,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
keep_shape : bool = True,
pad_mode : Union[PaddingMode, str] = "constant",
fill : Optional[FloatAnyT] = None,
drop_ratio : float = 0.0,
) -> tuple[TensorOrArray, TensorOrArray]:
"""Apply affine transformation on the image keeping image center invariant.
Args:
image (TensorOrArray[C, H, W]):
Image to be transformed.
box (TensorOrArray[B, 4]):
Bounding boxes. They are expected to be in (x1, y1, x2, y2) format
with `0 <= x1 < x2` and `0 <= y1 < y2`.
angle (float):
Rotation angle in degrees between -180 and 180, clockwise direction.
translate (IntAnyT):
Horizontal and vertical translations (post-rotation translation).
scale (float):
Overall scale
shear (FloatAnyT):
Shear angle value in degrees between -180 to 180, clockwise
direction. If a sequence is specified, the first value corresponds
to a shear parallel to the x axis, while the second value
corresponds to a shear parallel to the y axis.
center (ListOrTuple2T[int], optional):
Center of affine transformation. If `None`, use the center of the
image. Default: `None`.
interpolation (InterpolationMode):
Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is `InterpolationMode.NEAREST`.
If input is Tensor, only `InterpolationMode.NEAREST`,
`InterpolationMode.BILINEAR` are supported. For backward
compatibility integer values (e.g. `PIL.Image.NEAREST`) are still
acceptable.
keep_shape (bool):
If `True`, expands the output image to make it large enough to
hold the entire rotated image.
If `False` or omitted, make the output image the same size as the
input image.
Note that the `keep_shape` flag assumes rotation around the center
and no translation. Default: `True`.
pad_mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
fill (FloatAnyT, optional):
Pixel fill value for the area outside the transformed image.
If given a number, the value is used for all bands respectively.
drop_ratio (float):
If the fraction of a bounding box left in the image after being
clipped is less than `drop_ratio` the bounding box is dropped.
If `drop_ratio==0`, don't drop any bounding boxes. Default: `0.0`.
Returns:
image (TensorOrArray[C, H, W]):
Transformed image.
box (TensorOrArray[B, 4]):
Transformed box.
"""
image_size = get_image_size(image)
return \
affine(
image = image,
angle = angle,
translate = translate,
scale = scale,
shear = shear,
center = center,
interpolation = interpolation,
keep_shape = keep_shape,
pad_mode = pad_mode,
fill = fill,
), \
affine_box(
box = box,
image_size = image_size,
angle = angle,
translate = translate,
scale = scale,
shear = shear,
center = center,
drop_ratio = drop_ratio,
)
# MARK: - Modules
@TRANSFORMS.register(name="affine")
class Affine(nn.Module):
"""Apply affine transformation on the image keeping image center invariant.
If the image is Tensor, it is expected to have [..., H, W] shape,
where ... means an arbitrary number of leading dimensions.
Args:
angle (float):
Rotation angle in degrees between -180 and 180, clockwise direction.
translate (IntAnyT):
Horizontal and vertical translations (post-rotation translation).
scale (float):
Overall scale
shear (FloatAnyT):
Shear angle value in degrees between -180 to 180, clockwise
direction. If a sequence is specified, the first value corresponds
to a shear parallel to the x axis, while the second value
corresponds to a shear parallel to the y axis.
center (ListOrTuple2T[int], optional):
Center of affine transformation. If `None`, use the center of the
image. Default: `None`.
interpolation (InterpolationMode):
Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is `InterpolationMode.NEAREST`.
If input is Tensor, only `InterpolationMode.NEAREST`,
`InterpolationMode.BILINEAR` are supported. For backward
compatibility integer values (e.g. `PIL.Image.NEAREST`) are still
acceptable.
keep_shape (bool):
If `True`, expands the output image to make it large enough to
hold the entire rotated image.
If `False` or omitted, make the output image the same size as the
input image.
Note that the `keep_shape` flag assumes rotation around the center
and no translation. Default: `True`.
pad_mode (PaddingMode, str):
One of the padding modes defined in `PaddingMode`.
Default: `constant`.
fill (FloatAnyT, optional):
Pixel fill value for the area outside the transformed image.
If given a number, the value is used for all bands respectively.
"""
# MARK: Magic Functions
def __init__(
self,
angle : float,
translate : IntAnyT,
scale : float,
shear : FloatAnyT,
center : Optional[ListOrTuple2T[int]] = None,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
keep_shape : bool = True,
pad_mode : Union[PaddingMode, str] = "constant",
fill : Optional[FloatAnyT] = None,
):
super().__init__()
self.angle = angle
self.translate = translate
self.scale = scale
self.shear = shear
self.center = center
self.interpolation = interpolation
self.fill | |
TODO think about better solution
groups = {}
def hnd(ix, key):
key = tuple([ k for k in key if k not in ignore_fixs ])
if key not in groups:
groups[key] = []
groups[key].append(ix)
self._process_rows(ixs, hnd)
return dict([ (tuple([ self._features[k] for k in ks ]), vs) for (ks, vs) in groups.items() ])
class Explainer(object):
def __init__(self, explfile, csvfile, sample, cache, msg, protocol):
if protocol < 1:
self._load_protocol_0(explfile, csvfile, sample, cache, msg)
elif protocol < 2:
self._load_protocol_1(explfile, csvfile, sample, cache, msg)
else:
raise ValueError("unsupported protocol {0}".format(protocol))
self._expl_time = os.path.getmtime(explfile)
self._csv_time = os.path.getmtime(csvfile)
self._cache = cache
def _load_protocol_0(self, explfile, csvfile, sample, cache, msg):
expl_time = time.clock()
msg("loading explanations.. (protocol 0)")
with open(explfile, 'r') as f_e:
obj = json.load(f_e)
if "total_features" not in obj:
raise ValueError("missing key 'total_features' -- are you sure you want protocol 0?")
msg("successfully loaded {0} rows {1:6.2f}% labeled true\n{2} features AUC: {3:5.3f}",
obj["total_rows"], obj["total_true"] / obj["total_rows"] * 100.0,
obj["total_features"], obj["auc"])
self._best_th = None
self._ixs = obj["ixs"]
expls = obj["expls"]
self._train_ixs = obj["train_ixs"]
self._train_preds = obj["train_preds"]
if sample < 1.0:
random.seed(0)
sample_count = int(math.floor(sample * len(self._ixs)))
if sample_count < 2:
raise ValueError("test sample size too small: {0}".format(sample_count))
s_pos = random.sample(range(len(self._ixs)), sample_count)
s_ixs = []
s_expls = []
for sp in s_pos:
s_ixs.append(self._ixs[sp])
s_expls.append(expls[sp])
self._ixs = s_ixs
expls = s_expls
t_sample_count = int(math.floor(sample * len(self._train_ixs)))
if t_sample_count < 2:
raise ValueError("train sample size too small: {0}".format(t_sample_count))
t_pos = random.sample(range(len(self._train_ixs)), t_sample_count)
t_ixs = []
t_preds = []
for tp in t_pos:
t_ixs.append(self._train_ixs[tp])
t_preds.append(self._train_preds[tp])
self._train_ixs = t_ixs
self._train_preds = t_preds
msg("sample of {0} test and {1} train rows".format(sample_count, t_sample_count))
self._ixs_lookup = _optimize_lookup(dict([ (ix, pos) for (pos, ix) in enumerate(self._ixs) ]))
self._features = obj["features"]
if len(self._features) != obj["total_features"]:
raise ValueError("inconsistent features {0} != {1}".format(
len(self._features), obj["total_features"]))
self._auc = obj["auc"]
self._train_auc = obj["train_auc"]
if [ int(e["ix"]) for e in expls ] != self._ixs:
raise ValueError("inconsistent indexing")
self._expls = [ {
"ix": int(e["ix"]),
"file": e["file"],
"expl": _Explanation_v0(self._features, float(e["pred"]), e["up"], e["down"]),
"label": int(e["label"]) > 0,
"pred": float(e["pred"]),
} for e in expls ]
msg("loading explanations took {0:0.4f}s", time.clock() - expl_time)
dm = _DataMatrix_v0(csvfile, self._ixs, self._train_ixs,
self._lookup_key(self._ixs, lambda e: e["label"]), self._features, cache, msg)
self._dm = dm
def _load_protocol_1(self, explfile, csvfile, sample, cache, msg):
expl_time = time.clock()
msg("loading explanations.. (protocol 1)")
with open(explfile, 'r') as f_e:
obj = json.load(f_e)
msg("successfully loaded {0} rows {1:6.2f}% labeled true\n{2} features AUC: {3:5.3f}",
obj["total_rows"], np.float64(obj["total_true"]) / np.float64(obj["total_rows"]) * 100.0,
len(obj["features"]), obj["test_auc"])
features = [ f for f in obj["features"] ]
features_lookup = dict([ (f, ix) for (ix, f) in enumerate(features) ])
self._ixs = list(range(int(obj["total_rows"])))
expls = sorted(obj["expls"], key=lambda e: int(e["ix"]))
self._train_ixs = None
self._train_preds = None
th = np.float64(obj["threshold"])
self._best_th = th
if sample < 1.0:
raise NotImplementedError("subsampling not available for protocol 1 (yet)")
self._ixs_lookup = _optimize_lookup(dict([ (ix, pos) for (pos, ix) in enumerate(self._ixs) ]))
self._features = features
self._auc = np.float64(obj["test_auc"])
self._train_auc = np.float64(obj["train_auc"])
if [ int(e["ix"]) for e in expls ] != self._ixs:
raise ValueError("inconsistent indexing")
if any([ (np.float64(e["pred"]) >= th) != (int(e["pred_label"]) > 0) for e in expls ]):
raise ValueError("inconsistent prediction")
self._expls = [ {
"ix": int(e["ix"]),
"expl": _Explanation_v1(e, features, e["postfixes"], th, msg),
"label": int(e["label"]) > 0,
"pred": np.float64(e["pred"]),
} for e in expls ]
actual_pos = sum( 1 for l in self._get_labels(self._ixs)[0] if l == "T" )
if actual_pos != int(obj["total_true"]):
raise ValueError("inconsistent positive labels {0} != {1}".format(actual_pos, obj["total_true"]))
msg("loading explanations took {0:0.4f}s", time.clock() - expl_time)
dm = _DataMatrix_v1(csvfile, features, self._expls, cache, msg)
self._dm = dm
def _get_pred_label(self, pred, score):
l, r = score
return "F" if pred < r else ("T" if pred >= l else "U")
def _get_pred_raw(self, ixs):
return self._lookup_key(ixs, lambda e: e["pred"])
def _get_labels(self, ixs):
return self._lookup_key(ixs, self._get_label), [ "T", "F" ]
def _nc_get_roc_curve(self, ixs):
def get_roc(preds, labels, best_th):
total_pos = 0
total_neg = 0
th_pos = {}
th_neg = {}
# edge cases
th_pos[np.float64(0.0)] = 0
th_neg[np.float64(0.0)] = 0
th_pos[np.float64(1.0)] = 0
th_neg[np.float64(1.0)] = 0
th_pos[np.float64(1.0 + 1e-12)] = 0 # includes all elements
th_neg[np.float64(1.0 + 1e-12)] = 0
# count labels
for (ix, p) in enumerate(preds):
l = labels[ix] == "T"
p = np.float64(p)
if p not in th_pos:
th_pos[p] = 0
if p not in th_neg:
th_neg[p] = 0
if l:
total_pos += 1
th_pos[p] += 1
else:
total_neg += 1
th_neg[p] += 1
ths = sorted(th_pos.keys())
# first threshold == 0
tp = total_pos
tn = 0
fp = total_neg
fn = 0
roc = []
for (ix, th) in enumerate(ths):
roc.append({
"score": th,
"tp": tp,
"tn": tn,
"fp": fp,
"fn": fn,
})
tp -= th_pos[th]
fn += th_pos[th]
fp -= th_neg[th]
tn += th_neg[th]
best_t = None
if best_th is None:
best_v = None
for cur in roc:
lv = cur["fp"]
rv = cur["fn"]
v = lv + rv
if best_v is None or v < best_v:
best_v = v
best_t = cur["score"]
else:
best_t = best_th
return roc, best_t
preds = self._get_pred_raw(ixs)
labels = self._get_labels(ixs)[0]
best_t = self._best_th
if best_t is None:
if self._train_preds is None or self._train_ixs is None:
raise ValueError("missing threshold in protocol 1")
train_roc, best_t = get_roc(
self._train_preds, self._dm.get_train_labels(self._train_ixs), None)
roc, best_t = get_roc(preds, labels, best_t)
stats = self.get_stats(ixs, (best_t, best_t))
return {
"auc": self._auc,
"roc": roc,
"best_l": best_t,
"best_r": best_t,
"total_points": len(ixs),
"train_auc": self._train_auc,
"stats": stats,
}
def _get_expl(self, ix):
return self._expls[self._ixs_lookup[ix]]
def _lookup_key(self, ixs, mapping):
return [ mapping(self._get_expl(ix)) for ix in ixs ]
def _group_by(self, ixs, grouper):
groups = {}
for ix in ixs:
grp = grouper(self._get_expl(ix))
if grp not in groups:
groups[grp] = []
groups[grp].append(ix)
return groups
def _get_label(self, e):
return "T" if e["label"] else "F"
def _get_explanation(self, e, score):
expl = e["expl"].get_explanation(score)
return expl if expl else self._dm.get_vec(e["ix"])
def _same_explanation(self, e, score, expl):
return set(self._get_explanation(e, score)) == set(expl)
def _contains_explanation(self, e, score, expl):
eset = set(self._get_explanation(e, score))
for e in expl:
if e not in eset:
return False
return True
def _cmp_explanation(self, e, score, expl, partial):
if partial:
return self._contains_explanation(e, score, expl)
return self._same_explanation(e, score, expl)
def _query(self, ixs, condition):
good = []
bad = []
for ix in ixs:
if condition(self._get_expl(ix)):
good.append(ix)
else:
bad.append(ix)
return good, bad
def _query_explanation(self, ixs, score, expl, partial):
if len(ixs) < 1000:
return self._query(ixs, lambda e: self._cmp_explanation(e, score, expl, partial))
with self._cache.get_hnd({
"function": "expl",
"ixs": ixs,
"score": score,
"expl": expl,
"partial": partial,
"csv_time": self._csv_time,
"expl_time": self._expl_time,
}, "explainer") as c:
if c.has():
return c.read()
return c.write(self._query(ixs, lambda e: self._cmp_explanation(e, score, expl, partial)))
def _query_all_explanations(self, ixs, score):
if len(ixs) < 1000:
return self._group_by(ixs, lambda e: tuple(sorted(self._get_explanation(e, score))))
with self._cache.get_hnd({
"function": "all_expl",
"ixs": ixs,
"score": score,
"csv_time": self._csv_time,
"expl_time": self._expl_time,
}, "explainer") as c:
if c.has():
return c.read()
return c.write(self._group_by(ixs, lambda e: tuple(sorted(self._get_explanation(e, score)))))
def _group_conf(self, ixs, score):
def get_conf(e):
return self._get_confusion(e, score)
if len(ixs) < 1000:
return self._group_by(ixs, get_conf)
with self._cache.get_hnd({
"function": "conf",
"ixs": ixs,
"score": score,
"csv_time": self._csv_time,
"expl_time": self._expl_time,
}, "explainer") as c:
if c.has():
return c.read()
return c.write(self._group_by(ixs, get_conf))
def _get_confusion(self, e, score):
pred = self._get_pred_label(e["pred"], score)
label = self._get_label(e)
if pred == "U":
return "up" if label == "T" else "un"
if pred == label:
return "tp" if label == "T" else "tn"
return "fn" if label == "T" else "fp"
def _get_confusions(self, ixs, score):
return self._lookup_key(ixs, lambda e: self._get_confusion(e, score))
def _get_confusion_list(self):
return [ "tp", "fn", "fp", "tn", "up", "un", ]
def _group_count_by_label(self, ixs, score, simple):
details = self._get_confusion_list()
ixs_detail = self._group_conf(ixs, score)
if simple:
return dict([ (k, len(ixs_detail.get(k, []))) for k in details ])
return dict([ (k, self._dm.get_counts(ixs_detail.get(k, []))) for k in details ])
def _get_discriminant(self, ixs, score):
X = self._dm.get_vecs(ixs)
y = self._get_confusions(ixs, score)
clf = DecisionTreeClassifier(criterion="gini", splitter="best",
max_features=None, max_depth=None, random_state=0)
clf.fit(X, y)
return dict([
(self._features[fix], clf.feature_importances_[fix])
for fix in range(clf.feature_importances_.shape[0])
])
def get_all_ixs(self):
return self._ixs[:]
def get_pred_ixs(self):
ixs = self.get_all_ixs()
pths = self._group_by(ixs, lambda e: e["pred"])
return sorted([ {
"pred": pred,
"ixs": pixs,
} for (pred, pixs) in pths.items() ], key=lambda v: v["pred"])
def get_roc_curve(self):
ixs = self.get_all_ixs()
if len(ixs) | |
self.new_legs_1 = []
self.new_legs_2 = []
self.new_hypotenuses = []
construction_step(0)
my_triangle = triangle(
self.light_sources[0].get_source_point(),
OBSERVER_POINT,
self.light_sources[1].get_source_point()
)
angle_sign1 = right_angle(
self.light_sources[0].get_source_point(),
OBSERVER_POINT,
self.light_sources[1].get_source_point(),
size = RIGHT_ANGLE_SIZE
)
self.play(
FadeIn(angle_sign1),
FadeIn(my_triangle)
)
angle_sign2 = right_angle(
self.light_sources[1].get_source_point(),
self.lake_center,
OBSERVER_POINT,
size = RIGHT_ANGLE_SIZE
)
self.play(
FadeIn(angle_sign2)
)
self.wait()
self.play(
FadeOut(angle_sign1),
FadeOut(angle_sign2),
FadeOut(my_triangle)
)
indicator_wiggle()
self.remove(self.ls0_dot)
zoom_out_scene(2)
construction_step(1)
indicator_wiggle()
#self.play(FadeOut(self.ls0_dot))
zoom_out_scene(2)
construction_step(2)
indicator_wiggle()
self.play(FadeOut(self.ls0_dot))
self.play(
FadeOut(self.altitudes),
FadeOut(self.hypotenuses),
FadeOut(self.legs)
)
max_it = 6
scale = 2**(max_it - 4)
TEX_SCALE *= scale
# for i in range(3,max_it + 1):
# construction_step(i, show_steps = False, run_time = 4.0/2**i,
# simultaneous_splitting = True)
# simultaneous expansion of light sources from now on
self.play(FadeOut(self.inner_lake))
for n in range(3,max_it + 1):
new_lake = self.outer_lake.copy().scale(2,about_point = self.obs_dot.get_center())
for ls in self.light_sources_array:
lsp = ls.copy()
self.light_sources.add(lsp)
self.add(lsp)
self.light_sources_array.append(lsp)
new_lake_center = new_lake.get_center()
new_lake_radius = 0.5 * new_lake.get_width()
shift_list = (Transform(self.outer_lake,new_lake),)
for i in range(2**n):
theta = -TAU/4 + (i + 0.5) * TAU / 2**n
v = np.array([np.cos(theta), np.sin(theta),0])
pos1 = new_lake_center + new_lake_radius * v
pos2 = new_lake_center - new_lake_radius * v
shift_list += (self.light_sources.submobjects[i].move_source_to,pos1)
shift_list += (self.light_sources.submobjects[i+2**n].move_source_to,pos2)
self.play(*shift_list)
#self.revert_to_original_skipping_status()
# Now create a straight number line and transform into it
MAX_N = 17
origin_point = self.obs_dot.get_center()
self.number_line = NumberLine(
x_min = -MAX_N,
x_max = MAX_N + 1,
color = WHITE,
number_at_center = 0,
stroke_width = LAKE_STROKE_WIDTH,
stroke_color = LAKE_STROKE_COLOR,
#numbers_with_elongated_ticks = range(-MAX_N,MAX_N + 1),
numbers_to_show = list(range(-MAX_N,MAX_N + 1,2)),
unit_size = LAKE0_RADIUS * TAU/4 / 2 * scale,
tick_frequency = 1,
line_to_number_buff = LARGE_BUFF,
label_direction = UP,
).shift(scale * 2.5 * DOWN)
self.number_line.label_direction = DOWN
self.number_line_labels = self.number_line.get_number_mobjects()
self.wait()
origin_point = self.number_line.number_to_point(0)
nl_sources = VMobject()
pond_sources = VMobject()
for i in range(-MAX_N,MAX_N+1):
anchor = self.number_line.number_to_point(2*i + 1)
ls = self.light_sources_array[i].copy()
ls.move_source_to(anchor)
nl_sources.add(ls)
pond_sources.add(self.light_sources_array[i].copy())
self.add(pond_sources)
self.remove(self.light_sources)
self.outer_lake.rotate(TAU/8)
# open sea
open_sea = Rectangle(
width = 20 * scale,
height = 10 * scale,
stroke_width = LAKE_STROKE_WIDTH,
stroke_color = LAKE_STROKE_COLOR,
fill_color = LAKE_COLOR,
fill_opacity = LAKE_OPACITY,
).flip().next_to(origin_point,UP,buff = 0)
self.play(
ReplacementTransform(pond_sources,nl_sources),
ReplacementTransform(self.outer_lake,open_sea),
FadeOut(self.inner_lake)
)
self.play(FadeIn(self.number_line))
self.wait()
v = 4 * scale * UP
self.play(
nl_sources.shift,v,
morty.shift,v,
self.number_line.shift,v,
indicator.shift,v,
indicator_reading.shift,v,
open_sea.shift,v,
self.obs_dot.shift,v,
)
self.number_line_labels.shift(v)
origin_point = self.number_line.number_to_point(0)
#self.remove(self.obs_dot)
self.play(
indicator.move_to, origin_point + scale * UP,
indicator_reading.move_to, origin_point + scale * UP,
FadeOut(open_sea),
FadeOut(morty),
FadeIn(self.number_line_labels)
)
two_sided_sum = Tex("\dots", "+", "{1\over (-11)^2}",\
"+", "{1\over (-9)^2}", " + ", "{1\over (-7)^2}", " + ", "{1\over (-5)^2}", " + ", \
"{1\over (-3)^2}", " + ", "{1\over (-1)^2}", " + ", "{1\over 1^2}", " + ", \
"{1\over 3^2}", " + ", "{1\over 5^2}", " + ", "{1\over 7^2}", " + ", \
"{1\over 9^2}", " + ", "{1\over 11^2}", " + ", "\dots")
nb_symbols = len(two_sided_sum.submobjects)
two_sided_sum.scale(TEX_SCALE)
for (i,submob) in zip(list(range(nb_symbols)),two_sided_sum.submobjects):
submob.next_to(self.number_line.number_to_point(i - 13),DOWN, buff = 2*scale)
if (i == 0 or i % 2 == 1 or i == nb_symbols - 1): # non-fractions
submob.shift(0.3 * scale * DOWN)
self.play(Write(two_sided_sum))
for i in range(MAX_N - 5, MAX_N):
self.remove(nl_sources.submobjects[i].ambient_light)
for i in range(MAX_N, MAX_N + 5):
self.add_foreground_mobject(nl_sources.submobjects[i].ambient_light)
self.wait()
covering_rectangle = Rectangle(
width = FRAME_X_RADIUS * scale,
height = 2 * FRAME_Y_RADIUS * scale,
stroke_width = 0,
fill_color = BLACK,
fill_opacity = 1,
)
covering_rectangle.next_to(ORIGIN,LEFT,buff = 0)
for i in range(10):
self.add_foreground_mobject(nl_sources.submobjects[i])
self.add_foreground_mobject(indicator)
self.add_foreground_mobject(indicator_reading)
half_indicator_reading = Tex("{\pi^2 \over 8}").scale(TEX_SCALE)
half_indicator_reading.move_to(indicator)
central_plus_sign = two_sided_sum[13]
self.play(
FadeIn(covering_rectangle),
Transform(indicator_reading, half_indicator_reading),
FadeOut(central_plus_sign)
)
equals_sign = Tex("=").scale(TEX_SCALE)
equals_sign.move_to(central_plus_sign)
p = 2 * scale * LEFT + central_plus_sign.get_center()[1] * UP
self.play(
indicator.move_to,p,
indicator_reading.move_to,p,
FadeIn(equals_sign),
)
self.revert_to_original_skipping_status()
# show Randy admiring the result
randy = Randolph(color = MAROON_E).scale(scale).move_to(2*scale*DOWN+5*scale*LEFT)
self.play(FadeIn(randy))
self.play(randy.change,"happy")
self.play(randy.change,"hooray")
class CircumferenceText(Scene):
CONFIG = {"n" : 16}
def construct(self):
words = TexText("Circumference %d"%self.n)
words.scale(1.25)
words.to_corner(UP+LEFT)
self.add(words)
class CenterOfLargerCircleOverlayText(Scene):
def construct(self):
words = TexText("Center of \\\\ larger circle")
arrow = Vector(DOWN+LEFT, color = WHITE)
arrow.shift(words.get_bottom() + SMALL_BUFF*DOWN - arrow.get_start())
group = VGroup(words, arrow)
group.set_height(FRAME_HEIGHT - 1)
group.to_edge(UP)
self.add(group)
class DiameterWordOverlay(Scene):
def construct(self):
word = TexText("Diameter")
word.set_width(FRAME_X_RADIUS)
word.rotate(-45*DEGREES)
self.play(Write(word))
self.wait()
class YayIPTApplies(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Heyo! The Inverse \\\\ Pythagorean Theorem \\\\ applies!",
bubble_kwargs = {"width" : 5},
target_mode = "surprised"
)
self.change_student_modes(*3*["hooray"])
self.wait(2)
class WalkThroughOneMoreStep(TeacherStudentsScene):
def construct(self):
self.student_says("""
Wait...can you walk \\\\
through one more step?
""")
self.play(self.teacher.change, "happy")
self.wait(4)
class ThinkBackToHowAmazingThisIs(ThreeDScene):
CONFIG = {
"x_radius" : 100,
"max_shown_n" : 20,
}
def construct(self):
self.show_sum()
self.show_giant_circle()
def show_sum(self):
number_line = NumberLine(
x_min = -self.x_radius,
x_max = self.x_radius,
numbers_to_show = list(range(-self.max_shown_n, self.max_shown_n)),
)
number_line.add_numbers()
number_line.shift(2*DOWN)
positive_dots, negative_dots = [
VGroup(*[
Dot(number_line.number_to_point(u*x))
for x in range(1, int(self.x_radius), 2)
])
for u in (1, -1)
]
dot_pairs = it.starmap(VGroup, list(zip(positive_dots, negative_dots)))
# Decimal
decimal = DecimalNumber(0, num_decimal_places = 6)
decimal.to_edge(UP)
terms = [2./(n**2) for n in range(1, 100, 2)]
partial_sums = np.cumsum(terms)
# pi^2/4 label
brace = Brace(decimal, DOWN)
pi_term = Tex("\pi^2 \over 4")
pi_term.next_to(brace, DOWN)
term_mobjects = VGroup()
for n in range(1, self.max_shown_n, 2):
p_term = Tex("\\left(\\frac{1}{%d}\\right)^2"%n)
n_term = Tex("\\left(\\frac{-1}{%d}\\right)^2"%n)
group = VGroup(p_term, n_term)
group.scale(0.7)
p_term.next_to(number_line.number_to_point(n), UP, LARGE_BUFF)
n_term.next_to(number_line.number_to_point(-n), UP, LARGE_BUFF)
term_mobjects.add(group)
term_mobjects.set_color_by_gradient(BLUE, YELLOW)
plusses = VGroup(*[
VGroup(*[
Tex("+").next_to(
number_line.number_to_point(u*n), UP, buff = 1.25,
)
for u in (-1, 1)
])
for n in range(0, self.max_shown_n, 2)
])
zoom_out = always_shift(
self.camera.rotation_mobject,
direction = OUT, rate = 0.4
)
def update_decimal(decimal):
z = self.camera.rotation_mobject.get_center()[2]
decimal.set_height(0.07*z)
decimal.move_to(0.7*z*UP)
scale_decimal = Mobject.add_updater(decimal, update_decimal)
self.add(number_line, *dot_pairs)
self.add(zoom_out, scale_decimal)
tuples = list(zip(term_mobjects, plusses, partial_sums))
run_time = 1
for term_mobs, plus_pair, partial_sum in tuples:
self.play(
FadeIn(term_mobs),
Write(plus_pair, run_time = 1),
ChangeDecimalToValue(decimal, partial_sum),
run_time = run_time
)
self.wait(run_time)
run_time *= 0.9
self.play(ChangeDecimalToValue(decimal, np.pi**2/4, run_time = 5))
zoom_out.begin_wind_down()
self.wait()
self.remove(zoom_out, scale_decimal)
self.play(*list(map(FadeOut, it.chain(
term_mobjects, plusses,
number_line.numbers, [decimal]
))))
self.number_line = number_line
def show_giant_circle(self):
self.number_line.insert_n_curves(10000)
everything = VGroup(*self.mobjects)
circle = everything.copy()
circle.move_to(ORIGIN)
circle.apply_function(
lambda x_y_z : complex_to_R3(7*np.exp(complex(0, 0.0315*x_y_z[0])))
)
circle.rotate(-TAU/4, about_point = ORIGIN)
circle.center()
self.play(Transform(everything, circle, run_time = 6))
class ButWait(TeacherStudentsScene):
def construct(self):
self.student_says(
"But wait!",
target_mode = "angry",
run_time = 1,
)
self.change_student_modes(
"sassy", "angry", "sassy",
added_anims = [self.teacher.change, "guilty"],
run_time = 1
)
self.student_says(
"""
You promised us \\\\
$1+{1 \\over 4} + {1 \\over 9} + {1 \\over 16} + \\cdots$
""",
target_mode = "sassy",
)
self.wait(3)
self.teacher_says("Yes, but that's \\\\ very close.")
self.change_student_modes(*["plain"]*3)
self.wait(2)
class FinalSumManipulationScene(PiCreatureScene):
def construct(self):
LAKE_COLOR = BLUE
LAKE_OPACITY = 0.15
LAKE_STROKE_WIDTH = 5.0
LAKE_STROKE_COLOR = BLUE
TEX_SCALE = 0.8
LIGHT_COLOR2 = RED
LIGHT_COLOR3 = BLUE
unit_length = 1.5
vertical_spacing = 2.5 * DOWN
switch_on_time = 0.2
sum_vertical_spacing = 1.5
randy = self.get_primary_pi_creature()
randy.set_color(MAROON_D)
randy.color = MAROON_D
randy.scale(0.7).flip().to_edge(DOWN + LEFT)
self.wait()
ls_template = LightSource(
radius = 1,
num_levels = 10,
max_opacity_ambient = 0.5,
opacity_function = inverse_quadratic(1,0.75,1)
)
odd_range = np.arange(1,9,2)
even_range = np.arange(2,16,2)
full_range = np.arange(1,8,1)
self.number_line1 = NumberLine(
x_min = 0,
x_max = 11,
color = LAKE_STROKE_COLOR,
number_at_center = 0,
stroke_width = LAKE_STROKE_WIDTH,
stroke_color = LAKE_STROKE_COLOR,
#numbers_to_show = full_range,
number_scale_val = 0.5,
numbers_with_elongated_ticks = [],
unit_size = unit_length,
tick_frequency = 1,
line_to_number_buff = MED_SMALL_BUFF,
include_tip = True,
label_direction = UP,
)
self.number_line1.next_to(2.5 * UP + 3 * LEFT, RIGHT, buff = 0.3)
self.number_line1.add_numbers()
odd_lights = VMobject()
for i in odd_range:
pos = self.number_line1.number_to_point(i)
ls = ls_template.copy()
ls.move_source_to(pos)
odd_lights.add(ls)
self.play(
ShowCreation(self.number_line1, run_time = 5),
)
self.wait()
odd_terms = VMobject()
for i in odd_range:
if i == 1:
term = Tex("\phantom{+\,\,\,}{1\over " + str(i) + "^2}",
fill_color = LIGHT_COLOR, stroke_color = LIGHT_COLOR)
else:
term = Tex("+\,\,\, {1\over " + str(i) + "^2}",
fill_color = LIGHT_COLOR, stroke_color = LIGHT_COLOR)
term.next_to(self.number_line1.number_to_point(i), DOWN, buff = 1.5)
odd_terms.add(term)
for (ls, term) in zip(odd_lights.submobjects, odd_terms.submobjects):
self.play(
FadeIn(ls.lighthouse, run_time = switch_on_time),
SwitchOn(ls.ambient_light, run_time = switch_on_time),
Write(term, run_time = switch_on_time)
)
result1 = Tex("{\pi^2\over 8} =", fill_color = LIGHT_COLOR,
stroke_color = LIGHT_COLOR)
result1.next_to(self.number_line1, LEFT, buff = 0.5)
result1.shift(0.87 * vertical_spacing)
self.play(Write(result1))
self.number_line2 = self.number_line1.copy()
self.number_line2.numbers_to_show = full_range
self.number_line2.shift(2 * vertical_spacing)
self.number_line2.add_numbers()
full_lights = VMobject()
for i in full_range:
pos = self.number_line2.number_to_point(i)
ls = ls_template.copy()
ls.color = LIGHT_COLOR3
ls.move_source_to(pos)
full_lights.add(ls)
self.play(
ShowCreation(self.number_line2, run_time = 5),
)
self.wait()
full_lighthouses = VMobject()
full_ambient_lights = VMobject()
for ls in full_lights:
full_lighthouses.add(ls.lighthouse)
full_ambient_lights.add(ls.ambient_light)
self.play(
LaggedStartMap(FadeIn, full_lighthouses, lag_ratio = 0.2, run_time = 3),
)
self.play(
LaggedStartMap(SwitchOn, full_ambient_lights, lag_ratio = 0.2, run_time = 3)
)
| |
"""
return 'false'
def _equality_symbol(self):
"""
Returns the equality symbol in Maxima.
INPUT: none
OUTPUT: string
EXAMPLES::
sage: maxima._equality_symbol()
'='
sage: var('x y')
(x, y)
sage: maxima(x == y)
_SAGE_VAR_x=_SAGE_VAR_y
"""
return '='
def _inequality_symbol(self):
"""
Returns the inequality symbol in Maxima.
INPUT: none
OUTPUT: string
EXAMPLES::
sage: maxima._inequality_symbol()
'#'
sage: maxima((x != 1))
_SAGE_VAR_x#1
"""
return '#'
def _function_class(self):
"""
Return the Python class of Maxima functions.
INPUT: none
OUTPUT: type
EXAMPLES::
sage: maxima._function_class()
<class 'sage.interfaces.maxima.MaximaFunction'>
"""
return MaximaAbstractFunction
def _object_class(self):
"""
Return the Python class of Maxima elements.
INPUT: none
OUTPUT: type
EXAMPLES::
sage: maxima._object_class()
<class 'sage.interfaces.maxima.MaximaElement'>
"""
return MaximaAbstractElement
def _function_element_class(self):
"""
Return the Python class of Maxima functions of elements.
INPUT: none
OUTPUT: type
EXAMPLES::
sage: maxima._function_element_class()
<class 'sage.interfaces.maxima.MaximaFunctionElement'>
"""
return MaximaAbstractFunctionElement
def _object_function_class(self):
"""
Return the Python class of Maxima user-defined functions.
INPUT: none
OUTPUT: type
EXAMPLES::
sage: maxima._object_function_class()
<class 'sage.interfaces.maxima.MaximaElementFunction'>
"""
return MaximaAbstractElementFunction
####################
# Maxima functions #
####################
def function(self, args, defn, rep=None, latex=None):
"""
Return the Maxima function with given arguments and definition.
INPUT:
- ``args`` - a string with variable names separated by
commas
- ``defn`` - a string (or Maxima expression) that
defines a function of the arguments in Maxima.
- ``rep`` - an optional string; if given, this is how
the function will print.
OUTPUT: Maxima function
EXAMPLES::
sage: f = maxima.function('x', 'sin(x)')
sage: f(3.2) # abs tol 2e-16
-0.058374143427579909
sage: f = maxima.function('x,y', 'sin(x)+cos(y)')
sage: f(2, 3.5) # abs tol 2e-16
sin(2)-0.9364566872907963
sage: f
sin(x)+cos(y)
::
sage: g = f.integrate('z')
sage: g
(cos(y)+sin(x))*z
sage: g(1,2,3)
3*(cos(2)+sin(1))
The function definition can be a Maxima object::
sage: an_expr = maxima('sin(x)*gamma(x)')
sage: t = maxima.function('x', an_expr)
sage: t
gamma(x)*sin(x)
sage: t(2)
sin(2)
sage: float(t(2))
0.9092974268256817
sage: loads(t.dumps())
gamma(x)*sin(x)
"""
name = self._next_var_name()
if isinstance(defn, MaximaAbstractElement):
defn = defn.str()
elif not isinstance(defn, str):
defn = str(defn)
if isinstance(args, MaximaAbstractElement):
args = args.str()
elif not isinstance(args, str):
args = str(args)
cmd = '%s(%s) := %s'%(name, args, defn)
self._eval_line(cmd)
if rep is None:
rep = defn
f = self._object_function_class()(self, name, rep, args, latex)
return f
## def display2d(self, flag=True):
## """
## Set the flag that determines whether Maxima objects are
## printed using their 2-d ASCII art representation. When the
## maxima interface starts the default is that objects are not
## represented in 2-d.
## INPUT:
## flag -- bool (default: True)
## EXAMPLES
## sage: maxima('1/2')
## 1/2
## sage: maxima.display2d(True)
## sage: maxima('1/2')
## 1
## -
## 2
## sage: maxima.display2d(False)
## """
## self._display2d = bool(flag)
def plot2d(self, *args):
r"""
Plot a 2d graph using Maxima / gnuplot.
maxima.plot2d(f, '[var, min, max]', options)
INPUT:
- ``f`` - a string representing a function (such as
f="sin(x)") [var, xmin, xmax]
- ``options`` - an optional string representing plot2d
options in gnuplot format
EXAMPLES::
sage: maxima.plot2d('sin(x)','[x,-5,5]') # not tested
sage: opts = '[gnuplot_term, ps], [gnuplot_out_file, "sin-plot.eps"]'
sage: maxima.plot2d('sin(x)','[x,-5,5]',opts) # not tested
The eps file is saved in the current directory.
"""
self('plot2d(%s)'%(','.join([str(x) for x in args])))
def plot2d_parametric(self, r, var, trange, nticks=50, options=None):
r"""
Plot r = [x(t), y(t)] for t = tmin...tmax using gnuplot with
options.
INPUT:
- ``r`` - a string representing a function (such as
r="[x(t),y(t)]")
- ``var`` - a string representing the variable (such
as var = "t")
- ``trange`` - [tmin, tmax] are numbers with tmintmax
- ``nticks`` - int (default: 50)
- ``options`` - an optional string representing plot2d
options in gnuplot format
EXAMPLES::
sage: maxima.plot2d_parametric(["sin(t)","cos(t)"], "t",[-3.1,3.1]) # not tested
::
sage: opts = '[gnuplot_preamble, "set nokey"], [gnuplot_term, ps], [gnuplot_out_file, "circle-plot.eps"]'
sage: maxima.plot2d_parametric(["sin(t)","cos(t)"], "t", [-3.1,3.1], options=opts) # not tested
The eps file is saved to the current working directory.
Here is another fun plot::
sage: maxima.plot2d_parametric(["sin(5*t)","cos(11*t)"], "t", [0,2*pi()], nticks=400) # not tested
"""
tmin = trange[0]
tmax = trange[1]
cmd = "plot2d([parametric, %s, %s, [%s, %s, %s], [nticks, %s]]"%( \
r[0], r[1], var, tmin, tmax, nticks)
if options is None:
cmd += ")"
else:
cmd += ", %s)"%options
self(cmd)
def plot3d(self, *args):
r"""
Plot a 3d graph using Maxima / gnuplot.
maxima.plot3d(f, '[x, xmin, xmax]', '[y, ymin, ymax]', '[grid, nx,
ny]', options)
INPUT:
- ``f`` - a string representing a function (such as
f="sin(x)") [var, min, max]
- ``args`` should be of the form '[x, xmin, xmax]', '[y, ymin, ymax]',
'[grid, nx, ny]', options
EXAMPLES::
sage: maxima.plot3d('1 + x^3 - y^2', '[x,-2,2]', '[y,-2,2]', '[grid,12,12]') # not tested
sage: maxima.plot3d('sin(x)*cos(y)', '[x,-2,2]', '[y,-2,2]', '[grid,30,30]') # not tested
sage: opts = '[gnuplot_term, ps], [gnuplot_out_file, "sin-plot.eps"]'
sage: maxima.plot3d('sin(x+y)', '[x,-5,5]', '[y,-1,1]', opts) # not tested
The eps file is saved in the current working directory.
"""
self('plot3d(%s)'%(','.join([str(x) for x in args])))
def plot3d_parametric(self, r, vars, urange, vrange, options=None):
r"""
Plot a 3d parametric graph with r=(x,y,z), x = x(u,v), y = y(u,v),
z = z(u,v), for u = umin...umax, v = vmin...vmax using gnuplot with
options.
INPUT:
- ``x, y, z`` - a string representing a function (such
as ``x="u2+v2"``, ...) vars is a list or two strings
representing variables (such as vars = ["u","v"])
- ``urange`` - [umin, umax]
- ``vrange`` - [vmin, vmax] are lists of numbers with
umin umax, vmin vmax
- ``options`` - optional string representing plot2d
options in gnuplot format
OUTPUT: displays a plot on screen or saves to a file
EXAMPLES::
sage: maxima.plot3d_parametric(["v*sin(u)","v*cos(u)","v"], ["u","v"],[-3.2,3.2],[0,3]) # not tested
sage: opts = '[gnuplot_term, ps], [gnuplot_out_file, "sin-cos-plot.eps"]'
sage: maxima.plot3d_parametric(["v*sin(u)","v*cos(u)","v"], ["u","v"],[-3.2,3.2],[0,3],opts) # not tested
The eps file is saved in the current working directory.
Here is a torus::
sage: _ = maxima.eval("expr_1: cos(y)*(10.0+6*cos(x)); expr_2: sin(y)*(10.0+6*cos(x)); expr_3: -6*sin(x);")
sage: maxima.plot3d_parametric(["expr_1","expr_2","expr_3"], ["x","y"],[0,6],[0,6]) # not tested
Here is a Mobius strip::
sage: x = "cos(u)*(3 + v*cos(u/2))"
sage: y = "sin(u)*(3 + v*cos(u/2))"
sage: z = "v*sin(u/2)"
sage: maxima.plot3d_parametric([x,y,z],["u","v"],[-3.1,3.2],[-1/10,1/10]) # not tested
"""
umin = urange[0]
umax = urange[1]
vmin = vrange[0]
vmax = vrange[1]
cmd = 'plot3d([%s, %s, %s], [%s, %s, %s], [%s, %s, %s]'%(
r[0], r[1], r[2], vars[0], umin, umax, vars[1], vmin, vmax)
if options is None:
cmd += ')'
else:
cmd += ', %s)'%options
self(cmd)
def de_solve(self, de, vars, ics=None):
"""
Solves a 1st or 2nd order ordinary differential equation (ODE) in
two variables, possibly with initial conditions.
INPUT:
- ``de`` - a string representing the ODE
- ``vars`` - a list of strings representing the two
variables.
- ``ics`` - a triple of numbers [a,b1,b2] representing
y(a)=b1, y'(a)=b2
EXAMPLES::
sage: maxima.de_solve('diff(y,x,2) + 3*x = y', ['x','y'], [1,1,1])
y=3*x-2*%e^(x-1)
sage: maxima.de_solve('diff(y,x,2) + 3*x = y', ['x','y'])
y=%k1*%e^x+%k2*%e^-x+3*x
sage: maxima.de_solve('diff(y,x) + 3*x = y', ['x','y'])
y=(%c-3*(-x-1)*%e^-x)*%e^x
sage: maxima.de_solve('diff(y,x) + 3*x = y', ['x','y'],[1,1])
y=-%e^-1*(5*%e^x-3*%e*x-3*%e)
"""
if not isinstance(vars, str):
str_vars = '%s, %s'%(vars[1], vars[0])
else:
str_vars = vars
self.eval('depends(%s)'%str_vars)
m = self(de)
a = 'ode2(%s, %s)'%(m.name(), str_vars)
if ics is not None:
if len(ics) == 3:
cmd = "ic2("+a+",%s=%s,%s=%s,diff(%s,%s)=%s);"%(vars[0],ics[0], vars[1],ics[1], vars[1], vars[0], ics[2])
return self(cmd)
if len(ics) == 2:
return self("ic1("+a+",%s=%s,%s=%s);"%(vars[0],ics[0], vars[1],ics[1]))
return self(a+";")
def de_solve_laplace(self, de, vars, ics=None):
"""
Solves an ordinary differential equation (ODE) using Laplace
transforms.
INPUT:
- ``de`` - a string representing the ODE (e.g., de =
"diff(f(x),x,2)=diff(f(x),x)+sin(x)")
- ``vars`` - a list of strings representing the
variables (e.g., vars = ["x","f"])
- ``ics`` - a list of numbers representing initial
conditions, with symbols allowed which are represented by strings
(eg, f(0)=1, f'(0)=2 is ics = [0,1,2])
EXAMPLES::
sage: maxima.clear('x'); maxima.clear('f')
sage: maxima.de_solve_laplace("diff(f(x),x,2) = 2*diff(f(x),x)-f(x)", ["x","f"], [0,1,2])
f(x)=x*%e^x+%e^x
::
sage: maxima.clear('x'); maxima.clear('f')
sage: f = maxima.de_solve_laplace("diff(f(x),x,2) = 2*diff(f(x),x)-f(x)", ["x","f"])
sage: f
f(x)=x*%e^x*('at('diff(f(x),x,1),x=0))-f(0)*x*%e^x+f(0)*%e^x
sage: print f
!
x d ! x x
f(x) = x %e (-- (f(x))! ) - f(0) x %e + f(0) %e
dx !
!x = 0
.. note::
The second equation sets the values of `f(0)` and
`f'(0)` in Maxima, so subsequent ODEs involving these
variables will have these initial conditions automatically
imposed.
"""
if not (ics is None):
d = len(ics)
for i in range(0,d-1):
ic = 'atvalue(diff(%s(%s), %s, %s), %s = %s, %s)'%(
vars[1], vars[0], vars[0], i, vars[0], | |
# ==========================================================
# Artificial Intelligence. ETSII (University of Seville).
# Course 2017-18
# Deliverable 02
# ===========================================================
# Define, using Python, the functions asked in each exercise, using the blank
# space below the statement.
# IMPORTANT: DO NOT CHANGE THE NAMES EITHER TO THIS FILE OR TO THE FUNCTIONS
# ASKED IN EACH EXERCISE (in that case, the exercise will not be evaluated)
# THIS ASSIGNMENT WORTHS 5% OF THE TOTAL GRADE
# *****************************************************************************
# ACADEMIC INTEGRITY AND CHEATING: the assignments are individual, and thus
# they have to be carried out independently by each student. SHARING CODE IS
# STRICTLY FORBIDDEN. It as also forbidden to use any third-party code,
# available on web or on any source, without the approval of the teacher.
# Any plagiarism detected will result in a FINAL GRADE OF ZERO IN THE COURSE,
# for ALL the students involved, and it may lead to other disciplinary
# measures. Furthermore, the grades obtained until that moment will not be
# kept for future calls.
# *****************************************************************************
#
# This assignment consists in finishing the practice 03, partially done in the
# lab class on Nov 23rd (we did until exercise 06). The main goal of the
# practice is to define a genetic algorithm (the one presented in slide 17 of
# unit 5) and apply it to solve some instances of a simple knapsack problem.
# The exercises 1 to 6 in Practice 3 were done in that class. The definitions
# of the functions asked in those exercises have to be included here. If you
# did not attend the class (or even if you did), you can ask for the solutions
# of those exercises to the teacher (by email), or even to some of your
# classmates. But ONLY THOSE EXERCISES. THE REST OF THE EXERCISES HAVE TO BE
# DONE INDIVIDUALLY.
# ------------------------------------------------------------------
# ==============================================
# Part I: Implementation of a genetic algorithm
# ==============================================
# We will need again the random module
import random
# Include here the definitions of the functions of exercises 1 to 6.
class Problem_Genetic():
def __init__(self, genes, individuals_length, decode, fitness):
self.genes = genes
self.individuals_length = individuals_length
self.decode = decode
self.fitness = fitness
def mutation(self, chromosome, probability):
chromosomeMutated = chromosome[:] #Copy, not a pointer to chromosome
for i in range(self.individuals_length):
if random.random()<probability:
chromosomeMutated[i] = random.choice(self.genes)
return chromosomeMutated
def crossover(self, chromosome1, chromosome2):
position = random.randrange(1,self.individuals_length-1)
chromosomeM1 = chromosome1[:position]+chromosome2[position:]
chromosomeM2 = chromosome2[:position]+chromosome2[position:]
return chromosomeM1,chromosomeM2
def binary_to_decimal(x):
return sum(b*(2**i) for (i,b) in enumerate(x))
sq_gen = Problem_Genetic([0,1],10,binary_to_decimal,lambda chromosome: (binary_to_decimal(chromosome))**2)
def initial_population(problem_genetic, size):
return [[random.choice(problem_genetic.genes)
for _ in range(problem_genetic.individuals_length)]
for _ in range(size)]
def crossover_parents(Problem_Genetic, parents):
kids = []
for j in range(0, len(parents)-1,2):
kids.extend(Problem_Genetic.crossover(parents[j],parents[j+1]))
#incompleto
return kids
def mutate_individuals(Problem_Genetic,population,prob):
return [Problem_Genetic.mutation(c,prob) for c in population]
def select_one_by_tournament(problem_genetic, population, k, opt): #opt is MAX or MIN
participants = random.sample(population, k) #copy, but with no repetitions
return opt(participants, key = problem_genetic.fitness) #opt -> pointer to function (it should be min or max)
def tournament_selection(problem_genetic, population, n, k, opt):
genes = []
for _ in range(n):
genes.append(select_one_by_tournament(problem_genetic, population, k, opt))
return genes
# -----------
# Exercise 7
# -----------
# Using the previous auxiliary functions, define a function new_generation_t
# for computing a new generation from a given one, as described in the slide
# 17 of unit 5 (the genetic algorithm that uses tornement selection).
# We will assume the following seven input arguments:
# new_generation_t(problem_genetic,k,opt,population,
# n_parents,n_direct,prob_mutate)
# where:
# * problem_genetic: an instance of the class Problem_Genetic, with
# the optimization problem that we want to solve.
# * k: number of participants in the selection tournaments.
# * opt: max or min, indicating if it is a maximization or a
# minimization problem.
# * population:the current generation
# * n_parents: the number of parents
# * n_direct: the number of individuals taken directly for the
# next generation
# * prob_mutate: probability that a gene mutation will take place.
# NOTE: we will assume that n_parents+n_direct is equal to the size of the
# population. These numbers n_parents and n_direct will be computed in the
# function of the next exercise, that uses new_generation_t.
# =========== Solution:
def new_generation_t(problem_genetic,k,opt,population, n_parents,n_direct,prob_mutate):
tournament_winners = tournament_selection(problem_genetic,
population, n_parents, k, opt) #get the best chromosomes (n_parents) from population
random_selection = random.sample(population, n_direct) #get a random group from the population (given the population and the proportion)
sons = crossover_parents(problem_genetic, tournament_winners) #get sons from the tournament winners
prepopulation = sons + random_selection #join sons and direct individuals to get a new population
population = mutate_individuals(problem_genetic, prepopulation, prob_mutate) #randomly mutate given the problem, the population and the prob of mutating
return population
# =======================
# -----------
# Exercise 8
# -----------
# Implement the genetic algorithm described in slide 17 of unit 5. That is,
# define a function:
# genetic_algorithm_t(problem_genetic,k,opt,ngen,size,
# ratio_cross,prob_mutate)
# where the input arguments are:
# * problem_genetic: an instance of the class Problem_Genetic, with
# the optimization problem that we want to solve.
# * k: number of participants on the selection tournaments.
# * opt: max or min, indicating if it is a maximization or a
# minimization problem.
# * ngen: number of generations (halting condition)
# * size: number of individuals for each generation
# * ratio_cross: portion of the population which will be obtained by
# means of crossovers.
# * prob_mutate: probability that a gene mutation will take place.
# The function genetic_algorithm_t should return the phenotype of the best
# individual in the last generation computed, along with its fitness.
# After defining it, run the previous genetic algorithm to solve the
# sq_gen problem (both in its minimization and maximization variants).
# For example:
# >>> genetic_algorithm_t(sq_gen,3,min,20,10,0.7,0.1)
# (0, 0)
# >>> genetic_algorithm_t(sq_gen,3,max,20,10,0.7,0.1)
# (1023, 1046529)
# ============= Solution:
def genetic_algorithm_t(problem_genetic,k,opt,ngen,size,ratio_cross,prob_mutate):
n_parents = int(ratio_cross * size) #proportion of parents
n_direct = int((1 - ratio_cross) * size) #proportion of individuals that directly pass to the next generation
cont = 0
population = initial_population(problem_genetic, size) #generate the population randomly
#eval
best = select_one_by_tournament(problem_genetic, population, len(population), opt) #get the best
while cont < ngen:#continue until a number of iteration is done
population = new_generation_t(problem_genetic, k, opt, population,
n_parents, n_direct, prob_mutate) #get a new gen
#eval
best = select_one_by_tournament(problem_genetic, population, len(population), opt) #get the best
cont = cont + 1 #next iteration
return problem_genetic.decode(best),problem_genetic.fitness(best)#return the chromosome and its value
#fitness_knapsack(chromosome, n_objects, weights, capacity, values)
#decode_knapsack(chromosome, n_objects, weights, capacity)
# ===============
# ================================================
# Part II: Representation of the Knapsack problem
# ================================================
# The Knapsack problem can be stated as follows: given n objects of
# weights w_i and value v_i (i=1,...,n), select which objects should
# be carried in a knapsack having a maximum weight P, in such a way
# that the value of the selected objects is maximum.
# We will use the following representation:
# GENES: [0,1]
# INDIVIDUALS-LENGTH: N
# DECODE(X): we read the chromosome from left to right, a 1 at
# position i means that the i-th object is selected, with the
# following exception:
# If by selecting the object we go beyond the max weight, then this
# object is not selected (and neither is none of the remaining).
# F-OBJECTIVE(X): sum of the values of the selected objects
# (note that no penalty is required because of our decode function)
# -----------
# Exercise 8
# -----------
# Define a function
# decode_knapsack(chromosome, n_objects, weights, capacity)
# that receives as input:
# - a chromosome (i.e. a list of 0s and 1s, of length equal to
# n_objects)
# - n_objects: total number of available objects
# - weights: a list with the weight of each object
# - capacity: maximum weight of the knapsack.
# The output of this function is a list of 0s and 1s representing the
# set of selected objects (the i-th object is selected if and only if
# there is a 1 at position i). This list is obtained from the
# chromosome, filtering the objects that are discarded according to
# the DECODE description.
# ========== Solution:
def decode_knapsack(chromosome, n_objects, weights, capacity):
actual_weight = 0
representation = []
for i in range(n_objects):
if chromosome[i]*weights[i]+actual_weight <= capacity:
#if the new object can be represented (there is weigth enough) | |
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, QRect, QObject, pyqtSignal, QThread, QMutex
from PyQt5.QtGui import QPixmap, QFont, QIntValidator, QDoubleValidator
from PyQt5.QtMultimedia import QSound
import sentry_sdk
from tutorial import Tutorial
import time
import yaml
import pika
import pika.exceptions
import requests
from cryptography.fernet import Fernet, InvalidToken
import json
import base64
import traceback
sentry_sdk.init("https://[email protected]/5919963")
players_layout_6m = [
((400, 480), (300, 450)),
((20, 300), (140, 310)),
((20, 120), (140, 150)),
((300, 20), (420, 80)),
((680, 120), (580, 140)),
((680, 300), (580, 310)),
]
players_layout_3m = [
((400, 480), (300, 450)),
((20, 120), (140, 150)),
((680, 120), (580, 140)),
]
players_layout_hu = [
((400, 480), (300, 450)),
((300, 20), (420, 80)),
]
class EndWidget(QWidget):
def __init__(self):
super().__init__()
self.end_text = QLabel()
self.end_text.setParent(self)
self.replay_btn = QPushButton()
self.replay_btn.setParent(self)
self.replay_btn.setText('Replay')
class PokerTimer(QObject):
timer_sig = pyqtSignal()
def __init__(self, done_signal):
super().__init__()
done_signal.connect(self.stop)
self.done = False
def run(self):
while not self.done:
self.timer_sig.emit()
time.sleep(0.5)
def stop(self):
self.done = True
class GameStartListener(QObject):
game_start = pyqtSignal(str)
queue_update = pyqtSignal()
def __init__(self, channel, player_id):
super().__init__()
self.channel = channel
self.player_id = player_id
self.channel.queue_purge(f'public.{self.player_id}')
def run(self):
self.channel.basic_consume(f'public.{self.player_id}', on_message_callback=self.callback, auto_ack=True)
self.channel.start_consuming()
def callback(self, ch, method, properties, body):
self.queue_update.emit()
body = json.loads(body.decode('utf-8'))
if 'game' in body and self.player_id in body['players']:
self.game_start.emit(body['game'])
class Listener(QObject):
gamestate = pyqtSignal(dict)
private = pyqtSignal(dict)
def __init__(self, connection, player_id, key):
super().__init__()
self.connection = connection
self.channel = connection.channel()
self.key = key
self.player_id = player_id
self.channel.queue_purge(f'public.{self.player_id}')
def run(self):
self.channel.basic_consume(f'public.{self.player_id}', on_message_callback=self.callback, auto_ack=True,
consumer_tag=self.player_id)
self.channel.start_consuming()
def callback(self, ch, method, properties, body):
body = json.loads(body.decode('utf-8'))
if not body:
return
if 'private_to' in body:
if body['private_to'] == self.player_id:
try:
self.private.emit(json.loads(
Fernet(self.key.encode('utf-8')).decrypt(base64.b64decode(body['data'].encode('utf-8'))).decode(
'utf-8')))
except InvalidToken:
pass
else:
self.gamestate.emit(body)
def stop(self):
self.channel.basic_cancel(self.player_id)
class Board(QWidget):
def __init__(self):
super().__init__()
self.setFixedSize(320, 196)
self.f1 = QLabel(parent=self)
self.f2 = QLabel(parent=self)
self.f2.move(70, 0)
self.f3 = QLabel(parent=self)
self.f3.move(0, 98)
self.f4 = QLabel(parent=self)
self.f4.move(70, 98)
self.t1 = QLabel(parent=self)
self.t1.move(160, 0)
self.t2 = QLabel(parent=self)
self.t2.move(160, 98)
self.r = QLabel(parent=self)
self.r.move(250, 51)
self.imgs = [self.f1, self.f2, self.f3, self.f4, self.t1, self.t2, self.r]
self.board_cards = []
def setBoard(self, board_cards):
for img in self.imgs:
img.hide()
for i, cards in enumerate(zip(board_cards[::2], board_cards[1::2])):
img = ''.join(cards)
pxmap = QPixmap()
pxmap.load(f'images/{img}.png')
pxmap = pxmap.scaled(68, 94, transformMode=1)
self.imgs[i].setPixmap(pxmap)
self.imgs[i].show()
self.board_cards = [c + s for c, s in zip(board_cards[::2], board_cards[1::2])]
class ConnectRoomTab(QWidget):
def __init__(self):
super().__init__()
layout = QFormLayout()
self.setLayout(layout)
self.room_code_label = QLabel("Room code:")
self.room_code = QLineEdit()
layout.addRow(self.room_code_label, self.room_code)
self.connect_btn = QPushButton()
self.connect_btn.setText('Connect')
layout.addRow(self.connect_btn)
class MainConnectWindow(QWidget):
press_tutorial = pyqtSignal()
def __init__(self, default_nickname, default_server):
super().__init__()
layout = QVBoxLayout()
self.tutorial_btn = QPushButton()
self.tutorial_btn.setText("Tutorial (FR)")
layout.addWidget(self.tutorial_btn)
self.tutorial_btn.pressed.connect(self.press_tutorial.emit)
self.top_window = TopConnectWindow(default_nickname, default_server)
self.top_window.login_failure.connect(self.push_auth_fail)
layout.addWidget(self.top_window)
self.setLayout(layout)
self.query_logs = EventLog()
self.query_logs.setFixedHeight(60)
layout.addWidget(self.query_logs)
def push_auth_fail(self, msg):
self.query_logs.push_message(msg)
class TopConnectWindow(QWidget):
login_success = pyqtSignal(str, str, list, str, str)
login_failure = pyqtSignal(str)
register_success = pyqtSignal()
def __init__(self, default_nickname, default_server):
super().__init__()
layout = QFormLayout()
self.setLayout(layout)
self.nickname = QLineEdit()
self.nickname.setText(default_nickname)
layout.addRow(QLabel("Your nickname"), self.nickname)
self.password = QLineEdit()
self.password.setEchoMode(QLineEdit.Password)
layout.addRow(QLabel("Password"), self.password)
self.fqdn = QLineEdit()
self.fqdn.setText(default_server)
layout.addRow(QLabel("Server name"), self.fqdn)
self.login = QPushButton()
self.login.setText('Register')
self.login.pressed.connect(self.register_request)
layout.addRow(self.login)
self.login = QPushButton()
self.login.setText('Login')
self.login.pressed.connect(self.login_request)
layout.addRow(self.login)
def login_request(self):
user = self.nickname.text()
password = self.password.text()
response = requests.post(f'https://{self.fqdn.text()}/login', data={'user': user, 'password': password})
resp_data = response.json()
if 'status' in resp_data and resp_data['status'] == 'success':
self.login_success.emit(resp_data['token'], resp_data['key'],
resp_data['games'], resp_data['id'], password)
else:
self.login_failure.emit(resp_data['reason'])
def register_request(self):
user = self.nickname.text()
password = self.password.text()
response = requests.post(f'https://{self.fqdn.text()}/register', data={'user': user, 'password': password})
resp_data = response.json()
if 'status' in resp_data and resp_data['status'] == 'success':
self.register_success.emit()
class BetAmountWidget(QWidget):
def __init__(self, nb_cols=2):
super().__init__()
self.setFixedSize(200, 200)
text_font = QFont("Sans", 10)
self.text_widget = QLabel()
self.text_widget.setFont(text_font)
self.text_widget.setStyleSheet("QLabel { color : white; }")
self.text_widget.setParent(self)
self.chips = []
self.nb_cols = nb_cols
for i in range(7):
for j in range(nb_cols):
self.chips.append(QLabel())
self.chips[-1].setParent(self)
self.chips[-1].move(0 + 30 * j, 28 - 4 * i)
def set_amount(self, amount):
if not amount:
self.hide()
return
self.text_widget.setText(str(amount))
self.text_widget.adjustSize()
i = 0
for chip in self.chips:
chip.hide()
nb_chips_needed = 0
amount_est = amount
for chip_val in (500, 100, 25, 5, 1):
while amount_est >= chip_val:
amount_est -= chip_val
nb_chips_needed += 1
for chip_val in (500, 100, 25, 5, 1):
while amount >= chip_val and i < len(self.chips):
amount -= chip_val
chip = self.chips[i]
pxmap = QPixmap()
pxmap.load(f'images/chip_{chip_val}.png')
pxmap = pxmap.scaled(28, 22, transformMode=1)
chip.setPixmap(pxmap)
chip.show()
i += 1 if nb_chips_needed >= 8 else self.nb_cols
self.text_widget.move(40 if nb_chips_needed < 8 else 8 + 30 * self.nb_cols, 28)
self.show()
class HoleCardsWidget(QWidget):
def __init__(self):
super().__init__()
self.setFixedSize(108, 38)
self.cards = [QLabel(parent=self), QLabel(parent=self), QLabel(parent=self)]
self.cards[0].setGeometry(0, 0, 68, 38)
self.cards[1].setGeometry(20, 0, 68, 38)
self.cards[2].setGeometry(40, 0, 68, 38)
self.setCards([], True)
self.codes = []
def setCards(self, cards, is_folded):
if not cards:
pxmap = QPixmap()
pxmap.load('images/back.png')
rect2 = QRect(0, 0, 68, 38)
pxmap = pxmap.scaled(68, 94, transformMode=1).copy(rect2)
self.cards[0].setPixmap(pxmap)
self.cards[1].setPixmap(pxmap)
self.cards[2].setPixmap(pxmap)
elif cards != self.codes:
for i, card in enumerate(cards):
pxmap = QPixmap()
rect = QRect(0, 0, 68, 38)
pxmap.load(f'images/{card}.png')
pxmap = pxmap.scaled(68, 94, transformMode=1).copy(rect)
self.cards[i].setPixmap(pxmap)
self.codes = cards
for card in self.cards:
card.setGraphicsEffect(None)
if is_folded:
if not cards:
Opacity_0 = QGraphicsOpacityEffect()
Opacity_0.setOpacity(0)
self.setGraphicsEffect(Opacity_0)
else:
Opacity_40 = QGraphicsOpacityEffect()
Opacity_40.setOpacity(0.4)
self.setGraphicsEffect(Opacity_40)
else:
self.setGraphicsEffect(None)
self.show()
class EventLog(QLabel):
def __init__(self):
super().__init__()
self.messages = []
self.setStyleSheet('background-color: black; color: white')
text_font = QFont("Sans", 9)
self.setFont(text_font)
self.setAlignment(Qt.AlignBottom)
def push_message(self, message):
self.messages.append(message)
self.setText('\n'.join(self.messages[-5:]))
class PlayerWidget(QWidget):
def __init__(self, nickname, **kwargs):
super().__init__()
layout = QVBoxLayout()
self.setLayout(layout)
self.nickname = nickname
self.bet_amount_widget = None
self.is_folded = False
self.chips = 0
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
self.hcards = HoleCardsWidget()
layout.addWidget(self.hcards)
player_bg = QLabel(parent=self)
pxmap = QPixmap()
pxmap.load('images/PlayerTile.png')
player_bg.setPixmap(pxmap)
player_bg.adjustSize()
player_bg.move(0, 32)
text_font = QFont("Sans", 10)
self.text_area = QLabel()
self.text_area.setText(nickname)
self.text_area.setFont(text_font)
self.text_area.setStyleSheet("QLabel { color : white; }")
self.timer = QProgressBar(parent=self, textVisible=False, maximum=1000)
self.timer.setGeometry(2, 58, 104, 8)
self.timer.setStyleSheet("""QProgressBar:horizontal {padding: 2px; background: grey;}
QProgressBar::chunk {background-color: #0588BB; }""")
self.timer.setValue(1000)
text_font = QFont("Sans", 12, weight=1)
self.chip_count = QLabel()
self.chip_count.setText('500')
self.chip_count.setFont(text_font)
self.chip_count.setStyleSheet("QLabel { color : white; }")
layout.addWidget(self.text_area)
layout.addWidget(self.chip_count)
layout.setAlignment(self.text_area, Qt.AlignTop | Qt.AlignHCenter)
layout.setAlignment(self.chip_count, Qt.AlignTop | Qt.AlignHCenter)
self.setFixedSize(108, 94)
def setHoles(self, cards):
self.hcards.setCards(cards, self.is_folded)
def set_bet_amount_widget(self, widget):
self.bet_amount_widget = widget
class RaiseWidgetGroup(QWidget):
raise_change = pyqtSignal(int)
def __init__(self):
super().__init__()
self.raise_size = 0
self.min_raise = 0
self.max_raise = 0
self.slider = QSlider(orientation=Qt.Vertical)
self.slider.setFixedHeight(160)
self.slider.move(40, 0)
self.slider.setParent(self)
self.slider.actionTriggered.connect(self.slider_raise)
self.slider.adjustSize()
self.adjustSize()
self.free_text = QLineEdit()
self.free_text.setParent(self)
self.free_text.setGeometry(30, 180, 40, 20)
int_validator = QIntValidator()
self.free_text.setValidator(int_validator)
self.free_text.textEdited.connect(self.set_raise_amount)
self.slider.setMaximum(130)
self.slider.setSingleStep(1)
self.slider.setPageStep(1)
def set_raise_amount(self):
if not self.free_text.hasAcceptableInput():
return
amount = int(self.free_text.text())
self.raise_size = amount
self.raise_change.emit(self.raise_size)
range_ratio = (self.max_raise - 125 / 2) / self.min_raise
if self.raise_size == self.max_raise:
v = self.slider.maximum()
elif range_ratio < 1:
v = round(self.raise_size * 2 / self.min_raise)
else:
exp_increment = range_ratio ** 0.008
v = 0
for i in range(self.slider.maximum()):
if min(round(exp_increment ** i * self.min_raise + i / 2), self.max_raise) > self.raise_size:
v = i
break
self.slider.setValue(max(v - 2, 0))
def set_raise_range(self, min_raise, max_raise):
self.min_raise = min_raise
self.max_raise = max_raise
self.raise_size = min_raise
self.free_text.validator().setRange(self.min_raise, self.max_raise)
self.free_text.setText(str(self.min_raise))
self.slider.setValue(0)
def slider_raise(self):
value = max(self.slider.value() - 2, 0)
incr_lin_tot = 125 / 2
range_ratio = (self.max_raise - incr_lin_tot) / self.min_raise
if range_ratio <= 1:
self.raise_size = min(round(value / 2 * self.min_raise), self.max_raise)
else:
exp_increment = range_ratio ** 0.008
self.raise_size = min(round(exp_increment ** value * self.min_raise + value / 2), self.max_raise)
self.raise_change.emit(self.raise_size)
self.free_text.setText(str(self.raise_size))
class BetActions(QWidget):
def __init__(self):
super().__init__()
self.call = QPushButton()
self.call.move(80, 220)
self.call.setText('Call')
self.call.setParent(self)
self.fold = QPushButton()
self.fold.move(00, 220)
self.fold.setText('Fold')
self.fold.setParent(self)
self.bet = QPushButton()
self.bet.move(160, 220)
self.bet.setText('Raise')
self.bet.setParent(self)
self.raise_group = RaiseWidgetGroup()
self.raise_group.setGeometry(160, 0, 100, 200)
self.raise_group.raise_change.connect(self.raise_changed)
self.raise_group.setParent(self)
self.hide()
def raise_changed(self, value):
self.bet.setText(f'Raise {value}')
class PokerTableWidget(QWidget):
def __init__(self, nickname, spectate_only, close_callback):
super().__init__()
self.bg = QLabel(parent=self)
pixmap = QPixmap()
pixmap.load('images/Background.png')
self.close_callback = close_callback
self.bg.setPixmap(pixmap)
self.setFixedSize(800, 600)
self.board = Board()
self.board.setParent(self)
self.board.move(240, 220)
self.min_raise = 0
self.raise_size = 0
self.to_call = 0
self.players = []
self.nickname = nickname
self.spectate_only = spectate_only
self.pot_size = BetAmountWidget(nb_cols=3)
self.pot_size.setParent(self)
self.pot_size.move(340, 150)
self.bet_actions = BetActions()
self.bet_actions.setParent(self)
self.bet_actions.move(560, 320)
self.reconnect = QPushButton()
self.reconnect.move(560, 540)
self.reconnect.setText('Reconnect')
self.reconnect.setParent(self)
self.event_log = EventLog()
self.event_log.setFixedSize(200, 78)
self.event_log.setParent(self)
self.event_log.move(20, 500)
def closeEvent(self, close_event):
self.close_callback()
return super().closeEvent(close_event)
def setWinningHand(self, winning_hand):
for player in self.players:
if player.is_folded:
continue
if winning_hand and player.hcards.codes and all(card not in winning_hand for card in player.hcards.codes):
Opacity_40 = QGraphicsOpacityEffect()
Opacity_40.setOpacity(0.4)
player.hcards.setGraphicsEffect(Opacity_40)
else:
for card, widget in zip(player.hcards.codes, player.hcards.cards):
if winning_hand and card not in winning_hand:
Opacity_40 = QGraphicsOpacityEffect()
Opacity_40.setOpacity(0.4)
widget.setGraphicsEffect(Opacity_40)
else:
widget.setGraphicsEffect(None)
for card, widget in zip(self.board.board_cards, self.board.imgs):
if winning_hand and card not in winning_hand:
Opacity_40 = QGraphicsOpacityEffect()
Opacity_40.setOpacity(0.4)
widget.setGraphicsEffect(Opacity_40)
else:
widget.setGraphicsEffect(None)
def setBoard(self, board):
self.board.setBoard(board)
def setActive(self, nickname, players):
self.reconnect.hide()
if nickname == self.nickname:
self.bet_actions.show()
else:
self.bet_actions.hide()
for p in players:
if p['name'] == self.nickname and p['disconnected']:
| |
OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', YLeaf(YType.uint32, 'unknown-packets')),
('zero_length_body_packets', YLeaf(YType.uint32, 'zero-length-body-packets')),
('start_control_connection_requests', YLeaf(YType.uint32, 'start-control-connection-requests')),
('start_control_connection_replies', YLeaf(YType.uint32, 'start-control-connection-replies')),
('start_control_connection_notifications', YLeaf(YType.uint32, 'start-control-connection-notifications')),
('stop_control_connection_notifications', YLeaf(YType.uint32, 'stop-control-connection-notifications')),
('hello_packets', YLeaf(YType.uint32, 'hello-packets')),
('outgoing_call_requests', YLeaf(YType.uint32, 'outgoing-call-requests')),
('outgoing_call_replies', YLeaf(YType.uint32, 'outgoing-call-replies')),
('outgoing_call_connected_packets', YLeaf(YType.uint32, 'outgoing-call-connected-packets')),
('incoming_call_requests', YLeaf(YType.uint32, 'incoming-call-requests')),
('incoming_call_replies', YLeaf(YType.uint32, 'incoming-call-replies')),
('incoming_call_connected_packets', YLeaf(YType.uint32, 'incoming-call-connected-packets')),
('call_disconnect_notify_packets', YLeaf(YType.uint32, 'call-disconnect-notify-packets')),
('wan_error_notify_packets', YLeaf(YType.uint32, 'wan-error-notify-packets')),
('set_link_info_packets', YLeaf(YType.uint32, 'set-link-info-packets')),
('service_relay_requests', YLeaf(YType.uint32, 'service-relay-requests')),
('service_relay_replies', YLeaf(YType.uint32, 'service-relay-replies')),
('acknowledgement_packets', YLeaf(YType.uint32, 'acknowledgement-packets')),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "drop"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/counters/control/tunnel-xr/global/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.TunnelXr.Global.Drop, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
class Tunnels(Entity):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Counters.Control.Tunnels, self).__init__()
self.yang_name = "tunnels"
self.yang_parent_name = "control"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("tunnel", ("tunnel", L2Tp.Counters.Control.Tunnels.Tunnel))])
self._leafs = OrderedDict()
self.tunnel = YList(self)
self._segment_path = lambda: "tunnels"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/counters/control/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.Tunnels, [], name, value)
class Tunnel(Entity):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id (key)
L2TP tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Brief>`
.. attribute:: global_
Global data
**type**\: :py:class:`Global <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Counters.Control.Tunnels.Tunnel, self).__init__()
self.yang_name = "tunnel"
self.yang_parent_name = "tunnels"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['tunnel_id']
self._child_container_classes = OrderedDict([("brief", ("brief", L2Tp.Counters.Control.Tunnels.Tunnel.Brief)), ("global", ("global_", L2Tp.Counters.Control.Tunnels.Tunnel.Global))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('tunnel_id', YLeaf(YType.int32, 'tunnel-id')),
])
self.tunnel_id = None
self.brief = L2Tp.Counters.Control.Tunnels.Tunnel.Brief()
self.brief.parent = self
self._children_name_map["brief"] = "brief"
self._children_yang_names.add("brief")
self.global_ = L2Tp.Counters.Control.Tunnels.Tunnel.Global()
self.global_.parent = self
self._children_name_map["global_"] = "global"
self._children_yang_names.add("global")
self._segment_path = lambda: "tunnel" + "[tunnel-id='" + str(self.tunnel_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/counters/control/tunnels/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.Tunnels.Tunnel, ['tunnel_id'], name, value)
class Brief(Entity):
"""
L2TP control message local and remote addresses
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Counters.Control.Tunnels.Tunnel.Brief, self).__init__()
self.yang_name = "brief"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('remote_tunnel_id', YLeaf(YType.uint32, 'remote-tunnel-id')),
('local_address', YLeaf(YType.str, 'local-address')),
('remote_address', YLeaf(YType.str, 'remote-address')),
])
self.remote_tunnel_id = None
self.local_address = None
self.remote_address = None
self._segment_path = lambda: "brief"
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.Tunnels.Tunnel.Brief, ['remote_tunnel_id', 'local_address', 'remote_address'], name, value)
class Global(Entity):
"""
Global data
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Retransmit>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Received>`
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global.Drop>`
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Counters.Control.Tunnels.Tunnel.Global, self).__init__()
self.yang_name = "global"
self.yang_parent_name = "tunnel"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("transmit", ("transmit", L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit)), ("retransmit", ("retransmit", L2Tp.Counters.Control.Tunnels.Tunnel.Global.Retransmit)), ("received", ("received", L2Tp.Counters.Control.Tunnels.Tunnel.Global.Received)), ("drop", ("drop", L2Tp.Counters.Control.Tunnels.Tunnel.Global.Drop))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('total_transmit', YLeaf(YType.uint32, 'total-transmit')),
('total_retransmit', YLeaf(YType.uint32, 'total-retransmit')),
('total_received', YLeaf(YType.uint32, 'total-received')),
('total_drop', YLeaf(YType.uint32, 'total-drop')),
])
self.total_transmit = None
self.total_retransmit = None
self.total_received = None
self.total_drop = None
self.transmit = L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit()
self.transmit.parent = self
self._children_name_map["transmit"] = "transmit"
self._children_yang_names.add("transmit")
self.retransmit = L2Tp.Counters.Control.Tunnels.Tunnel.Global.Retransmit()
self.retransmit.parent = self
self._children_name_map["retransmit"] = "retransmit"
self._children_yang_names.add("retransmit")
self.received = L2Tp.Counters.Control.Tunnels.Tunnel.Global.Received()
self.received.parent = self
self._children_name_map["received"] = "received"
self._children_yang_names.add("received")
self.drop = L2Tp.Counters.Control.Tunnels.Tunnel.Global.Drop()
self.drop.parent = self
self._children_name_map["drop"] = "drop"
self._children_yang_names.add("drop")
self._segment_path = lambda: "global"
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.Tunnels.Tunnel.Global, ['total_transmit', 'total_retransmit', 'total_received', 'total_drop'], name, value)
class Transmit(Entity):
"""
Transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
super(L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit, self).__init__()
self.yang_name = "transmit"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('unknown_packets', YLeaf(YType.uint32, 'unknown-packets')),
('zero_length_body_packets', YLeaf(YType.uint32, 'zero-length-body-packets')),
('start_control_connection_requests', YLeaf(YType.uint32, 'start-control-connection-requests')),
('start_control_connection_replies', YLeaf(YType.uint32, 'start-control-connection-replies')),
('start_control_connection_notifications', YLeaf(YType.uint32, 'start-control-connection-notifications')),
('stop_control_connection_notifications', YLeaf(YType.uint32, 'stop-control-connection-notifications')),
('hello_packets', YLeaf(YType.uint32, 'hello-packets')),
('outgoing_call_requests', YLeaf(YType.uint32, 'outgoing-call-requests')),
('outgoing_call_replies', YLeaf(YType.uint32, 'outgoing-call-replies')),
('outgoing_call_connected_packets', YLeaf(YType.uint32, 'outgoing-call-connected-packets')),
('incoming_call_requests', YLeaf(YType.uint32, 'incoming-call-requests')),
('incoming_call_replies', YLeaf(YType.uint32, 'incoming-call-replies')),
('incoming_call_connected_packets', YLeaf(YType.uint32, 'incoming-call-connected-packets')),
('call_disconnect_notify_packets', YLeaf(YType.uint32, 'call-disconnect-notify-packets')),
('wan_error_notify_packets', YLeaf(YType.uint32, 'wan-error-notify-packets')),
('set_link_info_packets', YLeaf(YType.uint32, 'set-link-info-packets')),
('service_relay_requests', YLeaf(YType.uint32, 'service-relay-requests')),
('service_relay_replies', YLeaf(YType.uint32, 'service-relay-replies')),
('acknowledgement_packets', YLeaf(YType.uint32, 'acknowledgement-packets')),
])
self.unknown_packets = None
self.zero_length_body_packets = None
self.start_control_connection_requests = None
self.start_control_connection_replies = None
self.start_control_connection_notifications = None
self.stop_control_connection_notifications = None
self.hello_packets = None
self.outgoing_call_requests = None
self.outgoing_call_replies = None
self.outgoing_call_connected_packets = None
self.incoming_call_requests = None
self.incoming_call_replies = None
self.incoming_call_connected_packets = None
self.call_disconnect_notify_packets = None
self.wan_error_notify_packets = None
self.set_link_info_packets = None
self.service_relay_requests = None
self.service_relay_replies = None
self.acknowledgement_packets = None
self._segment_path = lambda: "transmit"
def __setattr__(self, name, value):
self._perform_setattr(L2Tp.Counters.Control.Tunnels.Tunnel.Global.Transmit, ['unknown_packets', 'zero_length_body_packets', 'start_control_connection_requests', 'start_control_connection_replies', 'start_control_connection_notifications', 'stop_control_connection_notifications', 'hello_packets', 'outgoing_call_requests', 'outgoing_call_replies', 'outgoing_call_connected_packets', 'incoming_call_requests', 'incoming_call_replies', 'incoming_call_connected_packets', 'call_disconnect_notify_packets', 'wan_error_notify_packets', 'set_link_info_packets', 'service_relay_requests', 'service_relay_replies', 'acknowledgement_packets'], name, value)
class Retransmit(Entity):
"""
Re transmit data
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: | |
key = 'weatherDewPoint'
value = header.value
elif header.key in ['fits:alt-obs']:
key = 'observerAlt'
value = header.value.split('/')[0].strip().strip("'").lower()
elif header.key in ['fits:pierside']:
key = 'pierSide'
value = header.value.split('/')[0].strip().strip("'").lower()
elif header.key in ['fits:flipstat']:
key = 'pierFlipState'
value = header.value
elif header.key in ['fits:object']:
key = 'object'
value = header.value.split('/')[0].strip().strip("'")
#TODO: Check if there is a declination component for hour angle or if it just uses regular declination.
#TODO: The documentation for 'fits:ha' says it is the 'telescope hour angle', need to confirm if this is the same as 'fits:objctha'.
elif header.key in ['fits:objctha', 'fits:ha']:
key = 'objectHA'
value = header.value.split('/')[0].strip().strip("'").lower()
elif header.key in ['fits:objctra', 'fits:ra']:
key = 'objectRA'
value = header.value.split('/')[0].strip().strip("'").lower()
if 'degree' in header.value.lower():
value = str(parseDMS(value))
else:
value = str(parseHMS(value))
elif header.key in ['fits:objctdec', 'fits:dec']:
key = 'objectDec'
value = header.value.split('/')[0].strip().strip("'").lower()
value = str(parseDMS(value))
elif header.key in ['fits:equinox', 'fits:epoch']:
key = 'equinox'
value = header.value.split()[0]
elif header.key in ['fits:objctalt', 'fits:altitude', 'fits:alt-obj']:
key = 'objectAlt'
value = header.value.split('/')[0].strip().strip("'").lower()
value = str(parseDMS(value))
elif header.key in ['fits:objctaz', 'fits:azimuth', 'fits:az-obj']:
key = 'objectAz'
value = header.value.split('/')[0].strip().strip("'").lower()
value = str(parseDMS(value))
elif header.key in ['fits:airmass', 'fits:secz']:
key = 'airmass'
value = header.value.split()[0]
elif header.key in ['fits:notes']:
key = 'notes'
value = header.value
elif header.key in ['comment', 'fits:comment']:
key = 'comment'
value = header.value
elif header.key in ['fits:history']:
key = 'history'
value = header.value
elif header.key in ['fits:aperture', 'fits:aptdia']:
key = 'aperture'
value = header.value.split()[0].strip().strip("'").lower()
elif header.key in ['fits:aptarea']:
key = 'apertureArea'
value = header.value.split()[0].strip().strip("'").lower()
elif header.key in ['fits:focallen']:
key = 'focalLength'
value = header.value.split()[0].strip().strip("'").lower()
elif header.key == 'fits:filter':
key = 'filter'
value = header.value.split()[0].strip().strip("'").lower()
elif header.key == 'fits:clrband':
key = 'colorBand'
value = header.value.split('/')[0].strip().strip("'").lower()
elif header.key == 'fits:colorspc':
key = 'colorSpace'
value = header.value.split('/')[0].strip().strip("'").lower()
elif header.key == 'fits:iso':
key = 'iso'
value = str(abs(int(header.value.split()[0].strip())))
else:
if header.key not in settings.NON_PROPERTY_KEYS:
errorText += 'Warning: Unhandled header key: ' + header.key + '\n'
continue
# Many of these are stripped already, but strip them once more just to be sure no extra whitespace got included.
key = key.strip()
value = value.strip()
if key != "" and value != "":
#TODO: Consider setting up a function to do bulk_create of image properties.
image.addImageProperty(key, value, False, header)
#TODO: Also need to read all the image properties like flatCorrected, etc, and set imageIsCalibrated accordingly.
for result in image.getImageProperty('history', asList=True):
if result.value.lower() == 'calibrated':
image.addImageProperty('imageIsCalibrated', 'true', True, result.header)
# Handle data split across multiple header fields like dateObs and timeObs.
dateObsResult = models.ImageProperty.objects.filter(image=image, key='dateObs').first()
timeObsResult = models.ImageProperty.objects.filter(image=image, key='timeObs').first()
if dateObsResult != None and timeObsResult != None:
try:
#TODO: Need to check that dateObs does not already include the time value, some do, some don't.
image.dateTime = dateparser.parse(dateObsResult.value + ' ' + timeObsResult.value)
image.save()
except ValueError:
errorText += "ERROR: Could not parse dateObs: " + value + "\n"
# If this image was stacked from multiple images we need to set/modify some ImageProperties.
numCombinedImages = models.ImageProperty.objects.filter(image=image, key='numCombinedImages').first()
if numCombinedImages is not None:
numCombinedImages = int(numCombinedImages.value)
if numCombinedImages > 1:
image.addImageProperty('imageIsStacked', 'yes', False, None)
stackedTypeDict = {
'light': 'stackedLight',
'dark': 'masterDark',
'bias': 'masterBias',
'flat': 'masterFlat',
}
imageType = image.getImageProperty('imageType')
try:
newImageType = stackedTypeDict[imageType]
except KeyError:
errorText += 'Unknown stacked image type: ' + str(imageType)
newImageType = imageType
if newImageType is not None:
image.addImageProperty('imageType', newImageType, True)
image.frameType = newImageType
image.save()
# If both objectRA and objectDec are 0 then remove them since they are likely just null values from the
# software that wrote the fits file.
objectRA = image.getImageProperty('objectRA')
objectDec = image.getImageProperty('objectDec')
if objectRA is not None and objectDec is not None:
if abs(float(objectRA) - 0) < 1e-9 and abs(float(objectDec) - 0) < 1e-9:
image.removeImageProperty('objectRA')
image.removeImageProperty('objectDec')
image.addImageProperty('objectRADecRemoved', 'true', True)
# If this image has one or more 'object' tags we should examine them to see what we can determine.
for obj in image.getImageProperty('object', asList=True):
imageTypeObjectDict = {
'master bias frame': 'masterBias',
'master dark frame': 'masterDark',
'master flat frame': 'masterFlat'
}
if obj.value.lower() in imageTypeObjectDict:
newImageType = imageTypeObjectDict[obj.value.lower()]
image.addImageProperty('imageType', newImageType, True)
image.frameType = newImageType
image.save()
else:
#TODO: Try to look up the object in the various catalogs we have in the database.
pass
# Set "known unknown" tags on fields that should be set for all images, but
# haven't been read in from the header in the file.
knownUnknownKeys = [ 'imageType', 'filter', 'exposureTime', 'flatCorrected',
'darkCorrected', 'biasCorrected', 'width', 'height', 'binningX', 'binningY',
'imageIsStacked' ]
for key in knownUnknownKeys:
imageProperty = image.getImageProperty(key)
if imageProperty is None:
image.addImageProperty(key, 'unknown');
# Examine the filename of the original file and see if there are parts of the file
# name that make sense now because of the headers we have parsed in.
filenameMask = [''] * len(image.fileRecord.originalFileName)
for c, i in zip(image.fileRecord.originalFileName, range(len(image.fileRecord.originalFileName))):
if c in [' ', '_', '-']:
filenameMask[i] = c
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
@shared_task
def flagSources(imageIdString, processInputId):
outputText = ""
errorText = ""
taskStartTime = time.time()
outputText += "Flagging image sources for image '{}'\n".format(imageIdString)
imageId = int(imageIdString)
image = models.Image.objects.get(pk=imageId)
# If this is a calibration image we do not need to run this task.
shouldReturn, retText = checkIfCalibrationImage(image, 'astrometryNet', 'skippedCalibration')
outputText += retText
if shouldReturn:
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
hotPixels = models.UserSubmittedHotPixel.objects.filter(image_id=imageId)
numHotPixels = hotPixels.count()
if numHotPixels > 0:
outputText += "Image has {} user submitted hot pixels in it:\n".format(numHotPixels)
tablesToSearch = [models.SextractorResult, models.Image2xyResult, models.DaofindResult,
models.StarfindResult, models.UserSubmittedResult, models.SourceFindMatch]
fwhmMedian = parseFloat(image.getImageProperty('fwhmMedian', 3.0))
for table in tablesToSearch:
sources = table.objects.filter(image_id=imageId)
for source in sources:
# Flagging as near edge if it is within 3 fwhm of the edge.
edgeDist = 3.0 * fwhmMedian
if source.pixelX <= edgeDist or source.pixelY <= edgeDist or \
source.pixelX >= image.dimX - edgeDist or source.pixelY >= image.dimY - edgeDist:
source.flagEdge = True
else:
source.flagEdge = False
for hotPixel in hotPixels:
deltaX = source.pixelX - hotPixel.pixelX
deltaY = source.pixelY - hotPixel.pixelY
distSquared = deltaX*deltaX + deltaY*deltaY
hpThreshold = 3.0 * fwhmMedian
# Flagging as hot pixel if the source is within 3 fwhm of a hot pixel.
if math.sqrt(distSquared) < hpThreshold:
outputText += "source {} is within {} pixels of hot pixel {}.\n".format(source.pk, round(hpThreshold, 1), hotPixel.pk)
source.flagHotPixel = True
source.confidence = 0.1
# If the source is not flagged as being near a hot pixel, change its value from Null to False in the
# database to differentiate between 'has not been checked yet' and 'has been checked but is not flagged'.
if source.flagHotPixel is None:
source.flagHotPixel = False
source.save()
else:
outputText += "Image has no user submitted hot pixels in it.\n"
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
@shared_task
def imageCombine(argList, processInputId):
outputText = ""
errorText = ""
taskStartTime = time.time()
argDict = {}
processInput = models.ProcessInput.objects.get(pk=processInputId)
#TODO: Change this.
desiredFilename = 'cosmic_combined.fit'
fss = FileSystemStorage()
outputFilename = fss.get_available_name(desiredFilename)
idList = []
for arg in argList:
try:
pk = int(arg)
idList.append(pk)
except ValueError:
splits = arg.split('=', 1)
if len(splits) == 2:
argType, argVal = splits[1].split(':', 1)
if argType == 'str':
argDict[splits[0]] = argVal
elif argType == 'int':
argDict[splits[0]] = int(argVal)
else:
errorText += "argType '{}' not recognised, aborting.".format(argType)
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
else:
errorText += "Could not parse '{}' as int or as 'arg=type:val', skipping argument.".format(arg)
return constructProcessOutput(outputText, errorText, time.time() - taskStartTime)
outputText += "argDict is:\n"
for key in argDict:
outputText += " " + key + " = " + str(argDict[key]) + "\n"
outputText += '\n\n'
if 'masterBiasId' in argDict:
masterBiasImage = models.Image.objects.filter(pk=argDict['masterBiasId']).first()
else:
masterBiasImage = None
if 'masterDarkId' in argDict:
masterDarkImage = models.Image.objects.filter(pk=argDict['masterDarkId']).first()
darkExposure = masterDarkImage.getExposureTime()
else:
masterDarkImage = None
if 'masterFlatId' in argDict:
masterFlatImage = models.Image.objects.filter(pk=argDict['masterFlatId']).first()
else:
masterFlatImage = None
images = models.Image.objects.filter(pk__in=idList)
dataArray = []
exposureSum = 0
exposureCount = 0
doReproject = True
for image in images:
if image.getBestPlateSolution() is None:
outputText += 'Image {} does not have a plate solution, not reprojecting.\n'.format(image.pk)
doReproject = False
if doReproject:
referenceWCS = images[0].getBestPlateSolution().wcs()
minX = None
minY = None
maxX = None
maxY = None
for image | |
<reponame>olmoulin/deer
"""
CRAR Neural network using Keras
"""
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Layer, Dense, Flatten, Activation, Conv2D, MaxPooling2D, UpSampling2D, Reshape, Permute, Add, Subtract, Dot, Multiply, Average, Lambda, Concatenate, BatchNormalization, Concatenate, RepeatVector, AveragePooling2D
from tensorflow.keras import regularizers
#np.random.seed(111111)
class NN():
"""
Deep Q-learning network using Keras
Parameters
-----------
batch_size : int
Number of tuples taken into account for each iteration of gradient descent
input_dimensions :
n_actions :
random_state : numpy random number generator
high_int_dim : Boolean
Whether the abstract state should be high dimensional in the form of frames/vectors or whether it should
be low-dimensional
"""
def __init__(self, batch_size, input_dimensions, n_actions, random_state, **kwargs):
self._input_dimensions=input_dimensions
self._batch_size=batch_size
self._random_state=random_state
self._n_actions=n_actions
self._high_int_dim=kwargs["high_int_dim"]
if(self._high_int_dim==True):
self.n_channels_internal_dim=kwargs["internal_dim"] #dim[-3]
else:
self.internal_dim=kwargs["internal_dim"] #2 for laby
#3 for catcher
def encoder_model(self):
""" Instantiate a Keras model for the encoder of the CRAR learning algorithm.
The model takes the following as input
s : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
Parameters
-----------
Returns
-------
Keras model with output x (= encoding of s)
"""
outs_conv=[]
inputs=[]
for i, dim in enumerate(self._input_dimensions):
# - observation[i] is a FRAME
if len(dim) == 3 or len(dim) == 4:
if(len(dim) == 4):
input = Input(shape=(dim[-4],dim[-3],dim[-2],dim[-1]))
inputs.append(input)
input = Reshape((dim[-4]*dim[-3],dim[-2],dim[-1]), input_shape=(dim[-4],dim[-3],dim[-2],dim[-1]))(input)
x=Permute((2,3,1), input_shape=(dim[-4]*dim[-3],dim[-2],dim[-1]))(input) #data_format='channels_last'
else:
input = Input(shape=(dim[-3],dim[-2],dim[-1]))
inputs.append(input)
x=Permute((2,3,1), input_shape=(dim[-3],dim[-2],dim[-1]))(input) #data_format='channels_last'
if(dim[-2]>12 and dim[-1]>12):
self._pooling_encoder=6
x = Conv2D(8, (2, 2), padding='same', activation='tanh')(x)
x = Conv2D(16, (2, 2), padding='same', activation='tanh')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=None, padding='same')(x)
x = Conv2D(32, (3, 3), padding='same', activation='tanh')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=None, padding='same')(x)
else:
self._pooling_encoder=1
if(self._high_int_dim==True):
x = Conv2D(self.n_channels_internal_dim, (1, 1), padding='same')(x)
out = x
else:
out = Flatten()(x)
# - observation[i] is a VECTOR
elif len(dim) == 2:
if dim[-3] > 3:
input = Input(shape=(dim[-3],dim[-2]))
inputs.append(input)
reshaped=Reshape((dim[-3],dim[-2],1), input_shape=(dim[-3],dim[-2]))(input) #data_format='channels_last'
x = Conv2D(16, (2, 1), activation='relu', border_mode='valid')(reshaped) #Conv on the history
x = Conv2D(16, (2, 2), activation='relu', border_mode='valid')(x) #Conv on the history & features
if(self._high_int_dim==True):
out = x
else:
out = Flatten()(x)
else:
input = Input(shape=(dim[-3],dim[-2]))
inputs.append(input)
out = Flatten()(input)
# - observation[i] is a SCALAR -
else:
if dim[-3] > 3:
# this returns a tensor
input = Input(shape=(dim[-3],))
inputs.append(input)
reshaped=Reshape((1,dim[-3],1), input_shape=(dim[-3],))(input) #data_format='channels_last'
x = Conv2D(8, (1,2), activation='relu', border_mode='valid')(reshaped) #Conv on the history
x = Conv2D(8, (1,2), activation='relu', border_mode='valid')(x) #Conv on the history
if(self._high_int_dim==True):
out = x
else:
out = Flatten()(x)
else:
input = Input(shape=(dim[-3],))
inputs.append(input)
out=input
outs_conv.append(out)
if(self._high_int_dim==True):
model = Model(inputs=inputs, outputs=outs_conv)
if(self._high_int_dim==False):
if len(outs_conv)>1:
x = Concatenate(outs_conv, mode='concat')
else:
x= outs_conv [0]
# we stack a deep fully-connected network on top
x = Dense(200, activation='tanh')(x)
x = Dense(100, activation='tanh')(x)
x = Dense(50, activation='tanh')(x)
x = Dense(10, activation='tanh')(x)
x = Dense(self.internal_dim)(x)#, activity_regularizer=regularizers.l2(0.00001))(x) #, activation='relu'
model = Model(inputs=inputs, outputs=x)
return model
def encoder_diff_model(self,encoder_model):
""" Instantiate a Keras model that provides the difference between two encoded pseudo-states
The model takes the two following inputs:
s1 : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
s2 : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
Parameters
-----------
encoder_model: instantiation of a Keras model for the encoder
Returns
-------
model with output the difference between the encoding of s1 and the encoding of s2
"""
inputs=[]
for j in range(2):
for i, dim in enumerate(self._input_dimensions):
if(len(dim) == 4):
input = Input(shape=(dim[-4],dim[-3],dim[-2],dim[-1]))
inputs.append(input)
input = Reshape((dim[-4]*dim[-3],dim[-2],dim[-1]), input_shape=(dim[-4],dim[-3],dim[-2],dim[-1]))(input)
elif(len(dim) == 3):
input = Input(shape=(dim[-3],dim[-2],dim[-1]))
inputs.append(input)
elif len(dim) == 2:
input = Input(shape=(dim[-3],dim[-2]))
inputs.append(input)
else:
input = Input(shape=(dim[-3],))
inputs.append(input)
half = len(inputs)//2
x1 = encoder_model(inputs[:half])
x2 = encoder_model(inputs[half:])
if (self._high_int_dim==True):
x1=Flatten()(x1)
x2=Flatten()(x2)
x = Subtract()([x1,x2])
model = Model(inputs=inputs, outputs=x)
return model
def transition_model(self):
""" Instantiate a Keras model for the transition between two encoded pseudo-states.
The model takes as inputs:
x : internal state
a : int
the action considered
Parameters
-----------
Returns
-------
model that outputs the transition of (x,a)
"""
if(self._high_int_dim==True):
dim=self._input_dimensions[0] #FIXME
inputs = [ Input(shape=((dim[-2] // self._pooling_encoder),(dim[-1] // self._pooling_encoder),self.n_channels_internal_dim)), Input( shape=(self._n_actions,) ) ] # data_format='channels_last'
layers_action=inputs[1]
layers_action=RepeatVector((dim[-2] // self._pooling_encoder)*(dim[-1] // self._pooling_encoder))(layers_action)
layers_action=Reshape(((dim[-2] // self._pooling_encoder),(dim[-1] // self._pooling_encoder),self._n_actions))(layers_action)
x = Concatenate(axis=-1)([layers_action,inputs[0]])
x = Conv2D(16, (1, 1), padding='same', activation='tanh')(x)
x = Conv2D(32, (2, 2), padding='same', activation='tanh')(x)
x = Conv2D(64, (3, 3), padding='same', activation='tanh')(x)
x = Conv2D(32, (2, 2), padding='same', activation='tanh')(x)
x = Conv2D(16, (1, 1), padding='same', activation='tanh')(x)
x = Conv2D(self.n_channels_internal_dim, (1, 1), padding='same')(x)
x = Add()([inputs[0],x])
else:
inputs = [ Input( shape=(self.internal_dim,) ), Input( shape=(self._n_actions,) ) ] # x
x = Concatenate()(inputs)
x = Dense(10, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x = Dense(30, activation='tanh')(x)
x = Dense(10, activation='tanh')(x)
x = Dense(self.internal_dim)(x)
x = Add()([inputs[0],x])
model = Model(inputs=inputs, outputs=x)
return model
def diff_Tx_x_(self,encoder_model,transition_model,plan_depth=0):
""" For plan_depth=0, instantiate a Keras model that provides the difference between T(E(s1),a) and E(s2).
Note that it gives 0 if the transition leading to s2 is terminal (we don't need to fit the transition if
it is terminal).
For plan_depth=0, the model takes the four following inputs:
s1 : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
s2 : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
a : list of ints with length (plan_depth+1)
the action(s) considered at s1
terminal : boolean
Whether the transition leading to s2 is terminal
Parameters
-----------
encoder_model: instantiation of a Keras model for the encoder (E)
transition_model: instantiation of a Keras model for the transition (T)
plan_depth: if>1, it provides the possibility to consider a sequence of transitions between s1 and s2
(input a is then a list of actions)
Returns
-------
model with output Tx (= model estimate of x')
"""
inputs=[]
for j in range(2):
for i, dim in enumerate(self._input_dimensions):
if(len(dim) == 4):
input = Input(shape=(dim[-4],dim[-3],dim[-2],dim[-1]))
inputs.append(input)
input = Reshape((dim[-4]*dim[-3],dim[-2],dim[-1]), input_shape=(dim[-4],dim[-3],dim[-2],dim[-1]))(input)
elif(len(dim) == 3):
input = Input(shape=(dim[-3],dim[-2],dim[-1]))
inputs.append(input)
elif len(dim) == 2:
input = Input(shape=(dim[-3],dim[-2]))
inputs.append(input)
else:
input = Input(shape=(dim[-3],))
inputs.append(input)
half = len(inputs)//2
enc_x = encoder_model(inputs[:half]) #s --> x
enc_x_ = encoder_model(inputs[half:]) #s --> x
Tx= enc_x
for d in range(plan_depth+1):
inputs.append(Input(shape=(self._n_actions,)))
Tx= transition_model([Tx,inputs[-1]])
x = Subtract()([Tx,enc_x_])
input = Input(shape=(1,)) # 1-terminals (0 if transition is terminal)
inputs.append(input)
x = Multiply()([x,inputs[-1]])# set to 0 if terminal because we don't care about fitting that transition
model = Model(inputs=inputs, outputs=x )
return model
def force_features(self,encoder_model,transition_model,plan_depth=0):
""" Instantiate a Keras model that provides the vector of the transition at E(s1). It is calculated as the different between E(s1) and E(T(s1)).
Used to force the directions of the transitions.
The model takes the four following inputs:
s1 : list of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
a : list of ints with length (plan_depth+1)
the action(s) considered at s1
Parameters
-----------
encoder_model: instantiation of a Keras model for the encoder (E)
transition_model: instantiation of a Keras model for the transition (T)
plan_depth: if>1, it provides the possibility to consider a sequence of transitions between s1 and s2 | |
j + 1] = False
# To determine if point before i is a spike, need n-1
# valid points after j:
k = min(y.size, j + n)
count = k - (j + 1) # n-1 if away from end
# shift good points backward in time to get rid of spikes:
# <---
# ......ssss+++++ ==> ......+++++
# i j
y[i : i + count] = y[j + 1 : k]
# update only sections that need it: from i-n to i
j = i
i = max(i - n, 0)
ave[i:j] = exclusive_sgfilter(y[i:k], n, exclude_point=xp)[: j - i]
y_delta[i:j] = abs(y[i:j] - ave[i:j])
avsq = exclusive_sgfilter(y[i:k] ** 2, n, exclude_point=xp)[: j - i]
var[i:j] = avsq - ave[i:j] ** 2
# use abs to care of negative numerical zeros:
std[i:j] = np.sqrt(abs(var[i:j]))
limit[i:j] = np.fmax(sigma * std[i:j], min_limit)
PV[i:j] = y_delta[i:j] > limit[i:j]
def _outs_last(y, n, sigma, min_limit, xp, ave, y_delta, var, std, limit):
PV = y_delta > limit
while True:
pv = PV.nonzero()[0]
if pv.size == 0:
yield None, ave + limit, ave - limit # we're done
# keep only first one ... later ones can change
pv = _sweep_out_nexts(y, pv[0], limit, ave)
yield pv, ave + limit, ave - limit
i, j = pv[0], pv[-1]
if j == y.size - 1:
yield None, ave + limit, ave - limit # we're done
PV[i : j + 1] = False
# To determine if point after j is a spike, need n-1
# valid points before i:
k = max(0, i - n + 1)
count = i - k # n-1 if away from start
# shift good points forward in time to get rid of spikes:
# --->
# ......ssss+++++ ==> ......+++++
# i j
y[j - count + 1 : j + 1] = y[k:i]
# update only sections that need it: from j to j+n
i = j
j = min(j + n, y.size)
m = i - j # -(j-i) ... keep last j-i points
ave[i:j] = exclusive_sgfilter(y[k:j], n, exclude_point=xp)[m:]
y_delta[i:j] = abs(y[i:j] - ave[i:j])
avsq = exclusive_sgfilter(y[k:j] ** 2, n, exclude_point=xp)[m:]
var[i:j] = avsq - ave[i:j] ** 2
# use abs to care of negative numerical zeros:
std[i:j] = np.sqrt(abs(var[i:j]))
limit[i:j] = np.fmax(sigma * std[i:j], min_limit)
PV[i:j] = y_delta[i:j] > limit[i:j]
def _outs_gen(y, n, sigma, min_limit, xp, ave, y_delta, limit):
PV = np.zeros(y.size, bool)
hi = ave + limit
lo = ave - limit
while True:
pv = y_delta > limit
if not pv.any():
yield None, hi, lo # we're done
PV[~PV] = pv
yield PV.nonzero()[0], hi, lo
y = y[~pv]
ave, y_delta, var, std, limit = _get_stats_full(y, n, sigma, min_limit, xp)
hi[~PV] = ave + limit
lo[~PV] = ave - limit
def _find_outlier_peaks(y, n, sigma, min_limit, xp):
ave, y_delta, var, std, limit = _get_stats_full(y, n, sigma, min_limit, xp)
if xp in ("first", 0):
y = y.copy()
yield from _outs_first(
y, n, sigma, min_limit, xp, ave, y_delta, var, std, limit
)
elif xp in ("last", n - 1):
y = y.copy()
yield from _outs_last(y, n, sigma, min_limit, xp, ave, y_delta, var, std, limit)
else:
yield from _outs_gen(y, n, sigma, min_limit, xp, ave, y_delta, limit)
def despike(
x,
n,
sigma=8.0,
maxiter=-1,
threshold_sigma=2.0,
threshold_value=None,
exclude_point="first",
**kwargs,
):
"""
Delete outlier data points from signal
Parameters
----------
x : 1d array_like
Signal to de-spike.
n : odd integer
Number of points for moving average; if even, it is reset to
``n+1``. If greater than the dimension of `x`, it is reset to
the dimension or 1 less.
sigma : real scalar; optional
Number of standard deviations beyond which a point is
considered an outlier. The default value is quite high; this
is possible because the point itself is excluded from the
calculations.
maxiter : integer; optional
Maximum number of iterations of outlier removal allowed. If
`exclude_point` is 'first', only the last spike is removed on
each iteration; if it is 'last', only the first spike is
removed on each iteration. It is done this way because
removing a spike can expose other points as spikes (but didn't
appear to be because the removed spike was present). If <= 0,
there is no set limit and the looping will stop when no more
outliers are detected. Routine will always run at least 1 loop
(setting `maxiter` to 0 is the same as setting it to 1).
threshold_sigma : scalar; optional
Number of standard deviations below which all data is kept.
This standard deviation is of the entire input signal minus
the moving average (using a window of `n` size). This value
exists to avoid deleting small deviations such as bit
toggles. Set to 0.0 to not use a threshold. `threshold_value`
overrides `threshold_sigma` if it is not None.
threshold_value : scalar or None; optional
Optional method for specifying a minimum threshold. If not
None, this scalar is used as an absolute minimum deviation
from the moving average for a value to be considered a spike.
Overrides `threshold_sigma`. Set to 0.0 to not use a
threshold.
exclude_point : string or int or None; optional
Defines where, within each window, the point that is being
considered as a potential outlier is. For example, 'first'
compares the first point in each window the rest in that
window to test if it is an outlier. This option is passed
directly to :func:`exclusive_sgfilter`. If integer, it must be
in [0, n), specifying the point to exclude. If string, it must
be 'first', 'middle', or 'last' (which is the same as ``0``,
``n // 2``, and ``n-1``, respectively). If None, the point
will be in the middle of the window and will not be excluded
from the statistics (this is not recommended).
**kwargs : other args are ignored
This is here to accommodate :func:`fixtime`.
Returns
-------
A SimpleNamespace with the members:
x : 1d ndarray
Despiked version of input `x`. Will be shorter than input `x`
if any spikes were deleted; otherwise, it will equal input
`x`.
pv : bool 1d ndarray; same size as input `x`
Has True where an outlier was detected
hilim : 1d ndarray; same size as input `x`
This is the upper limit: ``mean + sigma*std``
lolim : 1d ndarray; same size as input `x`
This is the lower limit: ``mean - sigma*std``
niter : integer
Number of iterations executed
Notes
-----
Uses :func:`exclusive_sgfilter` to exclude the point being tested
from the moving average and the moving standard deviation
calculations. Each point is tested. The points near the ends of
the signal may not be at the requested position in the window (see
:func:`exclusive_sgfilter` for more information on this).
To not use a threshold, set `threshold_sigma` to 0.0 (or set
`threshold_value` to 0.0).
.. note::
If you plan to use both :func:`fixtime` and :func:`despike`,
it is recommended that you let :func:`fixtime` call
:func:`despike` (via the `delspikes` option) instead of
calling it directly. This is preferable because the ideal time
to run :func:`despike` is in the middle of :func:`fixtime`:
after drop-outs have been deleted but before gaps are filled.
Examples
--------
Compare `exclude_point` 'first' and 'middle' options. An
explanation follows:
>>> import numpy as np
>>> from pyyeti import dsp
>>> x = [1, 1, 1, 1, 5, 5, 1, 1, 1, 1]
>>> s = dsp.despike(x, n=5, exclude_point='first')
>>> s.x
array([1, 1, 1, 1, 1, 1, 1, 1])
>>> s = dsp.despike(x, n=5, exclude_point='middle')
>>> s.x
array([1, 1, 1, 1, 5, 5, 1, 1, 1, 1])
The two 5 points get deleted when using 'first' but not when using
'middle'. This is logical because, when using 'first', the second
5 is compared to following four 1 values (the window is
``[5, 1, 1, 1, 1]``. The second loop then catches the other 5. But
when 'middle' is used, the window | |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
import array
import array
RESULT_PERSISTENCE_TYPE_SOTI = 1
RESULT_PERSISTENCE_TYPE_LAUNCHER = 2
RESULT_PERSISTENCE_TYPE_JUVI = 3
RESULT_CRYPTO_KEY_LENGTH = 3
class ResultInstance:
def __init__(self):
self.__dict__['id'] = 0
self.__dict__['versionMajor'] = 0
self.__dict__['versionMinor'] = 0
self.__dict__['versionFix'] = 0
self.__dict__['versionBuild'] = 0
def __getattr__(self, name):
if name == 'id':
return self.__dict__['id']
if name == 'versionMajor':
return self.__dict__['versionMajor']
if name == 'versionMinor':
return self.__dict__['versionMinor']
if name == 'versionFix':
return self.__dict__['versionFix']
if name == 'versionBuild':
return self.__dict__['versionBuild']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'id':
self.__dict__['id'] = value
elif name == 'versionMajor':
self.__dict__['versionMajor'] = value
elif name == 'versionMinor':
self.__dict__['versionMinor'] = value
elif name == 'versionFix':
self.__dict__['versionFix'] = value
elif name == 'versionBuild':
self.__dict__['versionBuild'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_INSTANCE_ID, self.__dict__['id'])
submsg.AddU32(MSG_KEY_RESULT_INSTANCE_VERSION_MAJOR, self.__dict__['versionMajor'])
submsg.AddU32(MSG_KEY_RESULT_INSTANCE_VERSION_MINOR, self.__dict__['versionMinor'])
submsg.AddU32(MSG_KEY_RESULT_INSTANCE_VERSION_FIX, self.__dict__['versionFix'])
submsg.AddU32(MSG_KEY_RESULT_INSTANCE_VERSION_BUILD, self.__dict__['versionBuild'])
mmsg.AddMessage(MSG_KEY_RESULT_INSTANCE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_INSTANCE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_INSTANCE_ID)
self.__dict__['versionMajor'] = submsg.FindU32(MSG_KEY_RESULT_INSTANCE_VERSION_MAJOR)
self.__dict__['versionMinor'] = submsg.FindU32(MSG_KEY_RESULT_INSTANCE_VERSION_MINOR)
self.__dict__['versionFix'] = submsg.FindU32(MSG_KEY_RESULT_INSTANCE_VERSION_FIX)
self.__dict__['versionBuild'] = submsg.FindU32(MSG_KEY_RESULT_INSTANCE_VERSION_BUILD)
class ResultConfigKey:
def __init__(self):
self.__dict__['path'] = ''
self.__dict__['value'] = ''
def __getattr__(self, name):
if name == 'path':
return self.__dict__['path']
if name == 'value':
return self.__dict__['value']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'path':
self.__dict__['path'] = value
elif name == 'value':
self.__dict__['value'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_CONFIG_KEY_PATH, self.__dict__['path'])
submsg.AddStringUtf8(MSG_KEY_RESULT_CONFIG_KEY_VALUE, self.__dict__['value'])
mmsg.AddMessage(MSG_KEY_RESULT_CONFIG_KEY, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_CONFIG_KEY, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['path'] = submsg.FindString(MSG_KEY_RESULT_CONFIG_KEY_PATH)
self.__dict__['value'] = submsg.FindString(MSG_KEY_RESULT_CONFIG_KEY_VALUE)
class ResultConfigBase:
def __init__(self):
self.__dict__['instance'] = ResultInstance()
self.__dict__['persistenceMethod'] = 0
self.__dict__['cryptoKey'] = array.array('L')
i = 0
while i < RESULT_CRYPTO_KEY_LENGTH:
self.__dict__['cryptoKey'].append(0)
i = i + 1
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'persistenceMethod':
return self.__dict__['persistenceMethod']
if name == 'cryptoKey':
return self.__dict__['cryptoKey']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'persistenceMethod':
self.__dict__['persistenceMethod'] = value
elif name == 'cryptoKey':
self.__dict__['cryptoKey'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg2 = MarshalMessage()
self.__dict__['instance'].Marshal(submsg2)
submsg.AddMessage(MSG_KEY_RESULT_CONFIG_BASE_CONFIG_INSTANCE, submsg2)
submsg.AddU8(MSG_KEY_RESULT_CONFIG_BASE_PERSISTENCE_METHOD, self.__dict__['persistenceMethod'])
submsg.AddData(MSG_KEY_RESULT_CONFIG_BASE_CRYPTO_KEY, self.__dict__['cryptoKey'])
mmsg.AddMessage(MSG_KEY_RESULT_CONFIG_BASE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_CONFIG_BASE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
submsg2 = submsg.FindMessage(MSG_KEY_RESULT_CONFIG_BASE_CONFIG_INSTANCE)
self.__dict__['instance'].Demarshal(submsg2)
self.__dict__['persistenceMethod'] = submsg.FindU8(MSG_KEY_RESULT_CONFIG_BASE_PERSISTENCE_METHOD)
self.__dict__['cryptoKey'] = submsg.FindData(MSG_KEY_RESULT_CONFIG_BASE_CRYPTO_KEY)
class ResultModule:
def __init__(self):
self.__dict__['size'] = 0
self.__dict__['order'] = 0
self.__dict__['flags'] = 0
self.__dict__['id'] = 0
self.__dict__['moduleName'] = ''
self.__dict__['processName'] = ''
self.__dict__['hash'] = array.array('B')
def __getattr__(self, name):
if name == 'size':
return self.__dict__['size']
if name == 'order':
return self.__dict__['order']
if name == 'flags':
return self.__dict__['flags']
if name == 'id':
return self.__dict__['id']
if name == 'moduleName':
return self.__dict__['moduleName']
if name == 'processName':
return self.__dict__['processName']
if name == 'hash':
return self.__dict__['hash']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'size':
self.__dict__['size'] = value
elif name == 'order':
self.__dict__['order'] = value
elif name == 'flags':
self.__dict__['flags'] = value
elif name == 'id':
self.__dict__['id'] = value
elif name == 'moduleName':
self.__dict__['moduleName'] = value
elif name == 'processName':
self.__dict__['processName'] = value
elif name == 'hash':
self.__dict__['hash'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_SIZE, self.__dict__['size'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_ORDER, self.__dict__['order'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_FLAGS, self.__dict__['flags'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_ID, self.__dict__['id'])
submsg.AddStringUtf8(MSG_KEY_RESULT_MODULE_MODULE_NAME, self.__dict__['moduleName'])
submsg.AddStringUtf8(MSG_KEY_RESULT_MODULE_PROCESS_NAME, self.__dict__['processName'])
submsg.AddData(MSG_KEY_RESULT_MODULE_HASH, self.__dict__['hash'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['size'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_SIZE)
self.__dict__['order'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_ORDER)
self.__dict__['flags'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_FLAGS)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_ID)
self.__dict__['moduleName'] = submsg.FindString(MSG_KEY_RESULT_MODULE_MODULE_NAME)
self.__dict__['processName'] = submsg.FindString(MSG_KEY_RESULT_MODULE_PROCESS_NAME)
try:
self.__dict__['hash'] = submsg.FindData(MSG_KEY_RESULT_MODULE_HASH)
except:
pass
class ResultModuleLoad:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
self.__dict__['moduleHandle'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
if name == 'moduleHandle':
return self.__dict__['moduleHandle']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
elif name == 'moduleHandle':
self.__dict__['moduleHandle'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_LOAD_INSTANCE, self.__dict__['instance'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_LOAD_ID, self.__dict__['id'])
submsg.AddU64(MSG_KEY_RESULT_MODULE_LOAD_MODULE_HANDLE, self.__dict__['moduleHandle'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE_LOAD, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE_LOAD, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_LOAD_INSTANCE)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_LOAD_ID)
self.__dict__['moduleHandle'] = submsg.FindU64(MSG_KEY_RESULT_MODULE_LOAD_MODULE_HANDLE)
class ResultModuleRead:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
self.__dict__['data'] = array.array('B')
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
if name == 'data':
return self.__dict__['data']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
elif name == 'data':
self.__dict__['data'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_READ_INSTANCE, self.__dict__['instance'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_READ_ID, self.__dict__['id'])
submsg.AddData(MSG_KEY_RESULT_MODULE_READ_DATA, self.__dict__['data'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE_READ, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE_READ, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_READ_INSTANCE)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_READ_ID)
self.__dict__['data'] = submsg.FindData(MSG_KEY_RESULT_MODULE_READ_DATA)
class ResultModuleAdd:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_ADD_INSTANCE, self.__dict__['instance'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_ADD_ID, self.__dict__['id'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE_ADD, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE_ADD, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_ADD_INSTANCE)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_ADD_ID)
class ResultModuleDelete:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_DELETE_INSTANCE, self.__dict__['instance'])
submsg.AddU32(MSG_KEY_RESULT_MODULE_DELETE_ID, self.__dict__['id'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE_DELETE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE_DELETE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_DELETE_INSTANCE)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_DELETE_ID)
class ResultModuleFree:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['moduleHandle'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'moduleHandle':
return self.__dict__['moduleHandle']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'moduleHandle':
self.__dict__['moduleHandle'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_MODULE_FREE_INSTANCE, self.__dict__['instance'])
submsg.AddU64(MSG_KEY_RESULT_MODULE_FREE_HANDLE, self.__dict__['moduleHandle'])
mmsg.AddMessage(MSG_KEY_RESULT_MODULE_FREE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MODULE_FREE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_MODULE_FREE_INSTANCE)
self.__dict__['moduleHandle'] = submsg.FindU64(MSG_KEY_RESULT_MODULE_FREE_HANDLE)
class ResultDriverLoad:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_DRIVER_LOAD_INSTANCE, self.__dict__['instance'])
submsg.AddU32(MSG_KEY_RESULT_DRIVER_LOAD_ID, self.__dict__['id'])
mmsg.AddMessage(MSG_KEY_RESULT_DRIVER_LOAD, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_DRIVER_LOAD, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['instance'] = submsg.FindU32(MSG_KEY_RESULT_DRIVER_LOAD_INSTANCE)
self.__dict__['id'] = submsg.FindU32(MSG_KEY_RESULT_DRIVER_LOAD_ID)
class ResultDriverUnload:
def __init__(self):
self.__dict__['instance'] = 0
self.__dict__['id'] = 0
def __getattr__(self, name):
if name == 'instance':
return self.__dict__['instance']
if name == 'id':
return self.__dict__['id']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'instance':
self.__dict__['instance'] = value
elif name == 'id':
self.__dict__['id'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
| |
<filename>image_preprocessing.py
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import print_function
import math
import os
import random
import re
import warnings
import cv2
import numpy as np
import scipy.ndimage as ndi
from keras import backend as K
from keras.applications.imagenet_utils import preprocess_input
from keras.preprocessing.image import DirectoryIterator, Iterator
from scipy import linalg
from six.moves import range
from torchvision import transforms
from bcolz_array_iterator import BcolzArrayIterator
from utils.image_utils import resize_and_pad
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
def randomCropFlips(size=224):
transform = transforms.Compose([
transforms.Lambda(lambda x: randomHorizontalFlip(x, u=0.5)),
transforms.Lambda(lambda x: randomCrop(x, size)),
transforms.Lambda(lambda x: preprocess_input(x, mode='tf')),
])
return transform
def centerCrop(size=224):
transform = transforms.Compose([
transforms.Lambda(lambda x: cropCenter(x, height=size, width=size)),
transforms.Lambda(lambda x: preprocess_input(x, mode='tf')),
])
return transform
# http://enthusiaststudent.blogspot.jp/2015/01/horizontal-and-vertical-flip-using.html
# http://qiita.com/supersaiakujin/items/3a2ac4f2b05de584cb11
def randomVerticalFlip(img, u=0.5):
if random.random() < u:
img = cv2.flip(img, 0) # np.flipud(img) #cv2.flip(img,0) ##up-down
return img
def randomHorizontalFlip(img, u=0.5):
shape = img.shape
if random.random() < u:
img = cv2.flip(img, 1) # np.fliplr(img) #cv2.flip(img,1) ##left-right
return img
def randomFlip(img, u=0.5):
if random.random() < u:
img = cv2.flip(img, random.randint(-1, 1))
return img
def randomTranspose(img, u=0.5):
if random.random() < u:
img = img.transpose(1, 0, 2) # cv2.transpose(img)
return img
def cropCenter(img, height, width):
h, w, c = img.shape
dx = (h - height) // 2
dy = (w - width) // 2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def randomCrop(img, crop_size=224):
h, w, c = img.shape
dy = random.randint(0, h - crop_size)
dx = random.randint(0, w - crop_size)
img = img[dy:dy + crop_size, dx:dx + crop_size]
return img
# http://stackoverflow.com/questions/16265673/rotate-image-by-90-180-or-270-degrees
def randomRotate90(img, u=0.25):
if random.random() < u:
angle = random.randint(1, 3) * 90
if angle == 90:
img = img.transpose(1, 0, 2) # cv2.transpose(img)
img = cv2.flip(img, 1)
# return img.transpose((1,0, 2))[:,::-1,:]
elif angle == 180:
img = cv2.flip(img, -1)
# return img[::-1,::-1,:]
elif angle == 270:
img = img.transpose(1, 0, 2) # cv2.transpose(img)
img = cv2.flip(img, 0)
# return img.transpose((1,0, 2))[::-1,:,:]
return img
def randomRotate(img, u=0.25, limit=90):
if random.random() < u:
angle = random.uniform(-limit, limit) # degree
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
img = cv2.warpAffine(img, mat, (height, width), flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
# img = cv2.warpAffine(img, mat, (height,width),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_CONSTANT)
return img
def randomShift(img, u=0.25, limit=4):
if random.random() < u:
dx = round(random.uniform(-limit, limit)) # pixel
dy = round(random.uniform(-limit, limit)) # pixel
height, width, channel = img.shape
img1 = cv2.copyMakeBorder(img, limit + 1, limit + 1, limit + 1,
limit + 1, borderType=cv2.BORDER_REFLECT_101)
y1 = limit + 1 + dy
y2 = y1 + height
x1 = limit + 1 + dx
x2 = x1 + width
img = img1[y1:y2, x1:x2, :]
return img
def randomShiftScale(img, u=0.25, limit=4):
if random.random() < u:
height, width, channel = img.shape
assert (width == height)
size0 = width
size1 = width + 2 * limit
img1 = cv2.copyMakeBorder(img, limit, limit, limit, limit,
borderType=cv2.BORDER_REFLECT_101)
size = round(random.uniform(size0, size1))
dx = round(random.uniform(0, size1 - size)) # pixel
dy = round(random.uniform(0, size1 - size))
y1 = dy
y2 = y1 + size
x1 = dx
x2 = x1 + size
if size == size0:
img = img1[y1:y2, x1:x2, :]
else:
img = cv2.resize(img1[y1:y2, x1:x2, :], (size0, size0),
interpolation=cv2.INTER_LINEAR)
return img
def randomScale(img, u=0.25, scale_factor=0.150):
if random.random() < u:
height, width, channel = img.shape
new_min_width = width * (1 - scale_factor)
new_width = round(random.uniform(new_min_width, width))
dx = round(random.uniform(0, width - new_width))
dy = round(random.uniform(0, width - new_width))
y1 = dy
y2 = y1 + new_width
x1 = dx
x2 = x1 + new_width
img = cv2.resize(img[y1:y2, x1:x2, :], (width, width),
interpolation=cv2.INTER_LINEAR)
return img
def makeRandomFlips():
transform = transforms.Compose([
transforms.Lambda(lambda x: randomFlip(x, u=0.5)),
transforms.Lambda(lambda x: randomTranspose(x, u=0.5)),
transforms.Lambda(lambda x: randomRotate90(x, u=0.5)),
])
return transform
def randomShiftScaleRotate(img, u=0.5, shift_limit=4, scale_limit=4,
rotate_limit=45):
if random.random() < u:
height, width, channel = img.shape
assert (width == height)
size0 = width
size1 = width + 2 * scale_limit
angle = random.uniform(-rotate_limit, rotate_limit) # degree
size = round(random.uniform(size0, size1))
dx = round(random.uniform(0, size1 - size)) # pixel
dy = round(random.uniform(0, size1 - size))
cc = math.cos(angle / 180 * math.pi) * (size / size0)
ss = math.sin(angle / 180 * math.pi) * (size / size0)
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [size0, 0], [size0, size0], [0, size0], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array(
[width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (height, width),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['PartnerRegistrationArgs', 'PartnerRegistration']
@pulumi.input_type
class PartnerRegistrationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None):
"""
The set of arguments for constructing a PartnerRegistration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if authorized_azure_subscription_ids is not None:
pulumi.set(__self__, "authorized_azure_subscription_ids", authorized_azure_subscription_ids)
if customer_service_uri is not None:
pulumi.set(__self__, "customer_service_uri", customer_service_uri)
if location is not None:
pulumi.set(__self__, "location", location)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if long_description is not None:
pulumi.set(__self__, "long_description", long_description)
if partner_customer_service_extension is not None:
pulumi.set(__self__, "partner_customer_service_extension", partner_customer_service_extension)
if partner_customer_service_number is not None:
pulumi.set(__self__, "partner_customer_service_number", partner_customer_service_number)
if partner_name is not None:
pulumi.set(__self__, "partner_name", partner_name)
if partner_registration_name is not None:
pulumi.set(__self__, "partner_registration_name", partner_registration_name)
if partner_resource_type_description is not None:
pulumi.set(__self__, "partner_resource_type_description", partner_resource_type_description)
if partner_resource_type_display_name is not None:
pulumi.set(__self__, "partner_resource_type_display_name", partner_resource_type_display_name)
if partner_resource_type_name is not None:
pulumi.set(__self__, "partner_resource_type_name", partner_resource_type_name)
if setup_uri is not None:
pulumi.set(__self__, "setup_uri", setup_uri)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility_state is not None:
pulumi.set(__self__, "visibility_state", visibility_state)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@authorized_azure_subscription_ids.setter
def authorized_azure_subscription_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_azure_subscription_ids", value)
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@customer_service_uri.setter
def customer_service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_service_uri", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> Optional[pulumi.Input[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@long_description.setter
def long_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "long_description", value)
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@partner_customer_service_extension.setter
def partner_customer_service_extension(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_extension", value)
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> Optional[pulumi.Input[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@partner_customer_service_number.setter
def partner_customer_service_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_number", value)
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> Optional[pulumi.Input[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@partner_name.setter
def partner_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_name", value)
@property
@pulumi.getter(name="partnerRegistrationName")
def partner_registration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner registration.
"""
return pulumi.get(self, "partner_registration_name")
@partner_registration_name.setter
def partner_registration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_registration_name", value)
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> Optional[pulumi.Input[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@partner_resource_type_description.setter
def partner_resource_type_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_description", value)
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@partner_resource_type_display_name.setter
def partner_resource_type_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_display_name", value)
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@partner_resource_type_name.setter
def partner_resource_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_name", value)
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@setup_uri.setter
def setup_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "setup_uri", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
@visibility_state.setter
def visibility_state(self, value: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]):
pulumi.set(self, "visibility_state", value)
class PartnerRegistration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
| |
<filename>grsnp/hypergeom4.py<gh_stars>10-100
#!/usr/bin/env python2
from __future__ import division
import argparse
import collections
import math
import sys
import logging
from logging import FileHandler,StreamHandler
#from bx.intervals.intersection import IntervalTree
from scipy.stats import hypergeom
import numpy as np
import scipy
import pdb
import os
import json
import rpy2.robjects as robjects
import gzip
import tarfile
import traceback
import StringIO
import dbcreator_ucsc as bedfilecreator
import textwrap
import subprocess
import sys
import commands
import mako
import simplejson
import zipfile
import inspect
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
import grsnp.dbcreator_util as grsnp_util
import random
import string
import collections
# Logging configuration
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
logger.propagate = 0
# This line outputs logging info to the console
console_output = False
print_progress = False
def get_overlap_statistics(gf,fois):
"""Returns a dictionary with indicating how many hits exist for each foi against the gf
gf: filepath for GF
fois: list of FOI filepaths
"""
results = []
out = ""
# use temporary files instead of piping out to console because large amounts of output to console can cause deadlock
# this creates unique random file names
tmp_path = get_tmp_file('grsnptmp')
tmp_error_path = get_tmp_file('grsnperrortmp')
tmp_file = open(tmp_path,'wb')
tmp_error_file = open(tmp_error_path,'wb')
try:
# Runs overlapStatistics with preprocessed background stats if they exist
out = subprocess.Popen(["overlapStatistics"] + [gf] + fois,stdout=tmp_file,stderr=tmp_error_file)
out.wait()
tmp_file.close()
tmp_error_file.close()
tmp = open(tmp_path).read()
tmp_er = open(tmp_error_path).read()
if tmp_er != "": logger.error(tmp_er)
if tmp[:6] == "ERROR:":
logger.error(tmp[7:])
raise Exception(tmp)
for x in tmp.split("\n")[1:]:
if x != "":
tmp = x.split("\t")
foi_name,n,hit_count = os.path.split(tmp[0])[-1],tmp[2],tmp[3]
results.append({"queryfile": foi_name,"queryregions": int(n),"intersectregions": int(hit_count),"indexregions": int(tmp[1])})
# remove the temporary output files
if os.path.exists(tmp_path): os.remove(tmp_path)
if os.path.exists(tmp_error_path): os.remove(tmp_error_path)
except Exception, e:
if not tmp_file.closed: tmp_file.close()
if not tmp_error_file.closed: tmp_error_file.close()
# remove the temporary output files
if os.path.exists(tmp_path): os.remove(tmp_path)
if os.path.exists(tmp_error_path): os.remove(tmp_error_path)
logger.error(traceback.format_exc())
raise e
return results
def get_tmp_file(prefix):
tmp_path = prefix + "_" + ''.join(random.choice(string.lowercase+string.digits) for _ in range(32))+'.tmp'
while (os.path.exists(tmp_path)):
tmp_path = prefix + "_" + ''.join(random.choice(string.lowercase+string.digits) for _ in range(32))+'.tmp'
return tmp_path
def get_bgobs(bg,gf,root_data_dir,organism,progress = None):
''' Check if pre-calculated GF and background overlap data exist.
If they do not, it manually calculates them.
'''
# get the grsnp_db_[filt] folder
filt_grsnp_db = gf.replace(root_data_dir,"").lstrip("/").split("/")[0]
bkg_overlap_path = os.path.join(root_data_dir,filt_grsnp_db,organism,'bkg_overlaps.gr')
# See if pre-calculated values exist
if os.path.exists(bkg_overlap_path):
data = open(bkg_overlap_path).read().split("\n")
data = [x.split("\t") for x in data if x != ""]
d_gf = [x[1] for x in data if os.path.join(root_data_dir,x[0]) == gf and x[1] != ""]
if len(d_gf) != 0:
bg_obs = [x.split(":")[1] for x in d_gf[0].split(",") if x.split(":")[0] == os.path.basename(bg)]
if len(bg_obs) != 0:
logger.info("Pre-calculated values found for background and {} ".format(base_name(gf)))
return bg_obs[0]
# manually get overlap values
logger.info("Calculating overlap stats on background and {}".format(base_name(gf)))
if progress:
_write_progress("Calculating overlap stats on background and {}".format(base_name(gf)),progress)
result = get_overlap_statistics(gf,[bg])
try:
result = int(result[0]["intersectregions"])
except Exception, e:
result = None
logger.error(traceback.format_exc())
return result
def output_p_value(foi_obs,n_fois,bg_obs,n_bgs,foi_path,gf_path,background_path,detailed_outpath,run_randomization_test=False, stat_test=None,progress = None):
"""Return the shrunken odds-ratio and signed p-value of all FOIs against the GF so they can be written to
matrix files. Outputs stats to the detailed results file.
"""
foi_name = base_name(foi_path)
gf_name = base_name(gf_path)
sign,pval,odds_ratio,shrunken_or,ci_lower,ci_upper = calculate_p_value_odds_ratio(foi_obs,n_fois,bg_obs,n_bgs,foi_name,gf_path,stat_test=stat_test,background_path = background_path, run_files_dir = run_files_dir,progress = progress)
if sign == 1 or str(odds_ratio) == "inf":
direction = "overrepresented"
else: direction = "underrepresented"
# # calculate the p_rand
# prnd = 1 # default prnd for non-significant results
# if pval > 0.05:
# direction = "nonsignificant"
# else:
# if run_randomization_test:
# _write_progress("Running randomization test on {}".format(foi_name),progress=progress)
# prnd = p_rand(foi_path,n_fois,background_path,bg_obs,n_bgs,gf_path, progress = progress, run_files_dir = os.path.split(detailed_outpath)[0])
# pval_unmod = pval
# pval = np.power(10,-(np.log10(prnd)- np.log10(pval))) # adjust p_value using randomization test
# write out to the detailed results file
strpval,strprnd = "",""
if run_randomization_test:
strpval = "%.2e" % pval if type(pval) != type("") else pval
strprnd = "%.2e" % prnd if type(prnd) != type("") else prnd
write_output("\t".join(map(str, [foi_name.rpartition('/')[-1], foi_obs, n_fois, bg_obs, n_bgs,
_format_type(odds_ratio),
_format_type(ci_lower),
_format_type(ci_upper),
_format_type(shrunken_or),
"%.2e" % pval if type(pval) != type("") else pval,
strprnd,strpval])) + "\n",detailed_outpath)
if pval < 1E-307:
# set to value obtained from sys.float_info.min_10_exp
pval = 1E-306
return [sign * pval,shrunken_or]
def _format_type(num):
''' Sets format to be either scientific or float depending on num value
'''
if type(num) != type(""):
if num > 100 or num < 0.01:
return "%.2e" % num
else:
return "%.2f" % num
else:
return num
def p_rand(foi_path,n_fois,background_path,bg_obs,n_bgs,gf_path, progress = None, run_files_dir = None):
''' Calculated by generating 'num' random feature files and running them against gf_path.
Calculates the mean of the p_values for the overrepresented and underrepresented random features separately.
'''
num = 10
rnds_paths = generate_randomsnps(foi_path,background_path,n_fois,num)
rnd_stats = get_overlap_statistics(gf_path,rnds_paths)
p_rand = [1]
for r in rnd_stats:
sign,pval,odds_ratio,_,_,_ = calculate_p_value_odds_ratio(r["intersectregions"],r["queryregions"],bg_obs,n_bgs,base_name(foi_path),gf_path,stat_test='chisquare', progress = None,run_files_dir = None)
p_rand.append(pval)
return np.min(p_rand)
def calculate_p_value_odds_ratio(foi_obs,n_fois,bg_obs,n_bgs,foi_name,gf_path,stat_test=None,background_path=None, progress = None,run_files_dir = None):
"""Calculates the p-value,confidence intervals and the shrunken odds ratio.
Returns [sign,pval,odds_ratio,shrunken_or,ci_lower,ci_upper]
"""
_write_progress("Testing {}".format(foi_name), progress)
## Perform the chisquare test regardless of what stat_test is selected, we need the odds ratio
bg_obs,n_bgs = int(bg_obs),int(n_bgs)
ctable = [[foi_obs, n_fois-foi_obs],
[bg_obs-foi_obs,n_bgs-n_fois-(bg_obs-foi_obs)]]
# Ensure there are no negative values in the ctable
do_chi_square = True
for i in ctable:
for k in i:
if k < 0:
logger.warning("Cannot calculate p-value for {} and {}. Is the background too small? foi_obs {}, n_fois {}, bg_obs {}, n_bgs {}".format(base_name(gf_path),foi_name,foi_obs,n_fois,bg_obs,n_bgs))
return [1,1,1,1,1,1]
# # ??? if sample too small, then perform fisher exact test
# if k < 5:
# do_chi_square = False
# check for zeros and add 0.5 if one of the cells is 0
if ctable[0][0] == 0 or ctable[0][1] == 0 or ctable[1][0] == 0 or ctable[1][1] == 0:
ctable[0][0] += 0.5
ctable[0][1] += 0.5
ctable[1][0] += 0.5
ctable[1][1] += 0.5
if do_chi_square:
chi_result = scipy.stats.chi2_contingency(ctable)
pval = chi_result[1]
odds_ratio = float(ctable[0][0]*ctable[1][1])/(ctable[0][1]*ctable[1][0])
else:
odds_ratio, pval = scipy.stats.fisher_exact(ctable)
# Adjustments of outliers
if odds_ratio == 0.0:
odds_ratio = sys.float_info.min
if np.isinf(odds_ratio):
odds_ratio = sys.float_info.max
# # If p-value is insignificant, so is odds ratio
# if pval == 1.0:
# odds_ratio = 1
# calculate the shrunken odds ratio
log_or = scipy.log(odds_ratio)
conf_coe = 1.96 # the confidence coefficient of a standard norm dist
# calculate the standard error
se = math.sqrt(1.0/ctable[0][0] + 1.0/ctable[1][0] + 1.0/ctable[0][1] + 1.0/ctable[1][1])
# calculate the upper and lower confidence interval
ci_upper = scipy.exp(log_or + conf_coe * se)
ci_lower = scipy.exp(log_or - conf_coe * se)
# Precaution against CI overflow
if np.isinf(ci_upper):
ci_upper = sys.float_info.max
if ci_lower == 0.0:
ci_lower = sys.float_info.min
# shrunken_or is the ci (either upper or lower) that is closest to 1
if odds_ratio < 1:
ci_array = [odds_ratio, ci_upper if ci_upper < 1 else odds_ratio]
ci_index = scipy.array(ci_array).argmax()
shrunken_or = ci_array[ci_index]
elif odds_ratio > 1:
ci_array = [ci_lower if ci_lower > 1 else odds_ratio, odds_ratio]
ci_index = scipy.array(ci_array).argmin()
shrunken_or = ci_array[ci_index]
# if ci_lower<1 and ci_upper>1:
# shrunken_or,odds_ratio = 1,1
# else:
# find which value is closer to 1
# ci_index = scipy.array([[abs(math.log(ci_lower)),abs(math.log(ci_upper))]]).argmin()
# shrunken_or = [ci_lower,ci_upper][ci_index]
## If a different stat_test is selected, perform that test now, and replace the p-value
## note we will still use the odds ratio calculated by the chi-square test
if stat_test == "binomial":
pval = scipy.stats.binom_test(foi_obs, n_fois, float(bg_obs)/n_bgs)
# monte carlo is passed as 'montecarlo_[number_of_simulations]'
elif stat_test.startswith("montecarlo"):
num_mc = int(stat_test.split("_")[1])
rndfoipath = os.path.join(run_files_dir,'mc.bed')
# pow_mc states what starting power of 10 to check pvalue
chunk_size, pow_mc, not_significant = 100, 2, False
num_rnd_obs = [] # stores the number of rnd_snps that overlap for each mc
# run the rnd_fois in groups against the GF (allows us to handle case of >10,000 MC simulations)
for i_chunk in xrange(1, num_mc, chunk_size):
if not_significant == True: break
# only create the number of rnd_snps files needed (i.e for 14 mc with chunk of 10 we only want to create 4 files for last chunk)
rnd_count = chunk_size if i_chunk + chunk_size < num_mc else num_mc - i_chunk + 1
# Generate the random fois
rnd_fois_paths = generate_randomsnps(rndfoipath,background_path,n_fois,rnd_count)
# _write_progress("Performing Monte Carlo {} of {}".format(i_chunk,num_mc), progress)
# get overlap stats for random_features against the GF
overlapstats = get_overlap_statistics(gf_path, rnd_fois_paths)
for i_res,res in enumerate(overlapstats):
| |
for treating
a larger number of rays.
"""
indices = self.indices[:, start_index, end_index]
num_points_sets = self.fermat_path.num_points_sets
x = np.zeros(num_points_sets, s.FLOAT)
y = np.zeros(num_points_sets, s.FLOAT)
z = np.zeros(num_points_sets, s.FLOAT)
for (i, (points, j)) in enumerate(zip(self.fermat_path.points, indices)):
x[i] = points.x[j]
y[i] = points.y[j]
z[i] = points.z[j]
return g.Points.from_xyz(x, y, z, "Ray")
def gone_through_extreme_points(self):
"""
Returns the rays which are going through at least one extreme point in the interfaces.
These rays can be non physical, it is then safer to be conservative and remove them all.
Extreme points are the first/last points (in indices) in the interfaces, except the first and
last interfaces (respectively the points1 and the grid).
Returns
-------
out : ndarray of bool
``rays[i, j]`` is True if the rays starting from the i-th point of the first interface
and going to the j-th point of the last interface is going through at least one extreme point
through the middle interfaces.
Order: same as attribute ``indices``.
"""
order = "F" if self.indices.flags.f_contiguous else "C"
shape = self.indices.shape[1:]
out = np.zeros(shape, order=order, dtype=np.bool)
interior_indices = self.interior_indices
middle_points = tuple(self.fermat_path.points)[1:-1]
for (d, points) in enumerate(middle_points):
np.logical_or(out, interior_indices[d, ...] == 0, out=out)
np.logical_or(out, interior_indices[d, ...] == (len(points) - 1), out=out)
return out
def to_fortran_order(self):
"""
Returns a Ray object with the .
TFM objects except to have lookup times indexed by (grid_idx, probe_idx) whereas
the arrays in this object are indexed by (probe_idx, grid_idx). By converting
them to Fortran array, their transpose are C-contiguous and indexed as expected
by TFM objects.
Returns
-------
Rays
"""
return self.__class__(
np.asfortranarray(self.times),
np.asfortranarray(self.interior_indices),
self.fermat_path,
"F",
)
@staticmethod
def expand_rays(interior_indices, indices_new_interface):
"""
Expand the rays by one interface knowing the beginning of the rays and the
points the rays must go through at the last interface.
A0, A1, ..., A(d+1) are (d+2) interfaces.
n: number of points of interface A0
m: number of points of interface Ad
p: number of points of interface A(d+1)
For more information on ``interior_indices``, see the documentation of ``Rays``.
Parameters
----------
interior_indices: *interior* indices of rays going from A(0) to A(d).
Shape: (d, n, m)
indices_new_interface: indices of the points of interface A(d) that the rays
starting from A(0) cross to go to A(d+1).
Shape: (n, p)
Returns
-------
expanded_indices
Shape (d+1, n, p)
"""
d, n, m = interior_indices.shape
n_, p = indices_new_interface.shape
if n != n_:
raise ValueError("Inconsistent shapes")
if d == 0:
new_shape = (1, *indices_new_interface.shape)
return indices_new_interface.reshape(new_shape)
else:
expanded_indices = np.empty((d + 1, n, p), dtype=interior_indices.dtype)
_expand_rays(interior_indices, indices_new_interface, expanded_indices)
return expanded_indices
def reverse(self, order="f"):
"""
Returns a new Rays object which corresponds to the reversed path.
Parameters
----------
order : str
Order of the arrays 'times' and 'indices'. Default: 'f'
Returns
-------
reversed_rays : Rays
"""
reversed_times = np.asarray(self.times.T, order=order)
# Input x of shape (d, n, m)
# Output y of shape(d, m, n) such as ``x[k, i, j] == y[d - k, j, i]``
reversed_indices = np.swapaxes(self.interior_indices, 1, 2)
reversed_indices = reversed_indices[::-1, ...]
reversed_indices = np.asarray(reversed_indices, order=order)
reversed_path = self.fermat_path.reverse()
return self.__class__(reversed_times, reversed_indices, reversed_path)
class FermatPath(tuple):
"""
FermatPath(points_and_speeds)
This object contain the interface points through which the pass during the propagation and the speeds
between the consecutive interfaces.
This object should be used only for the internal plumbing of FermatSolver. This object can be obtained from a
(smarter) :class:`Path` object via the class method :meth:`FermatPath.from_path`.
A FermatPath must starts and ends with Points objects. Speeds (stored as float) and Points must alternate.
Ex: FermatPath((points_1, speed_1_2, points_2, speed_2_3, points_3))
"""
def __new__(cls, sequence):
if len(sequence) % 2 == 0 or len(sequence) < 3:
raise ValueError(
"{} expects a sequence of length odd and >= 5)".format(cls.__name__)
)
assert all(np.isfinite(sequence[1::2])), "nonfinite velocity"
return super().__new__(cls, sequence)
@classmethod
def from_path(cls, path):
"""
Create a FermatPath object from a (smarter) Path object.
"""
path_pieces = []
for interface, material, mode in zip(
path.interfaces, path.materials, path.modes
):
velocity = material.velocity(mode)
path_pieces.append(interface.points)
path_pieces.append(velocity)
path_pieces.append(path.interfaces[-1].points)
return cls(path_pieces)
def __repr__(self):
return "{}({})".format(
self.__class__.__name__, ", ".join([str(x) for x in self])
)
def __add__(self, tail):
if self[-1] != tail[0]:
raise ValueError("Cannot join two subpaths with different extremities.")
return self.__class__((*self, *tail[1:]))
def reverse(self):
return self.__class__(tuple(reversed(self)))
def split_head(self):
"""
Split a Path in two at the first interface:
``(points_1, speed_1_2, points_2)`` and ``(points_2, speed_2_3, ..., points_n)``.
"""
if len(self) < 5:
raise ValueError("Not enough elements to split (min: 5)")
head = self.__class__(self[:3])
tail = self.__class__(self[2:])
return head, tail
def split_queue(self):
"""
Split a Path in two at the last interface:
``(points_1, speed_1_2, ... points_n1)`` and ``(points_n1, speed, points_n)``.
"""
if len(self) < 5:
raise ValueError("Not enough elements to split (min: 5)")
head = self.__class__(self[:-2])
tail = self.__class__(self[-3:])
return head, tail
@property
def points(self):
"""
Returns all the Points objects in Path as a tuple.
"""
return tuple(self[0::2])
@property
def velocities(self):
return tuple(self[1::2])
@property
def num_points_sets(self):
return len(self) // 2 + 1
@property
def len_largest_interface(self):
"""
Excluse first and last dataset
"""
all_points = tuple(self.points)
interfaces = all_points[1:-1]
if not interfaces:
return 0
else:
return max([len(x) for x in interfaces])
class FermatSolver:
"""
Solver: take as input the interfaces, give as output the ray paths.
General usage: instantiate object, then call method ``solve`` (or ``solve_no_clean``
to keep intermediary results). Results are stored in attributes ``res``.
Parameters
----------
paths : set of FermatPath
Paths which will be solved. Solving several paths at a time allows an efficient caching.
dtype : numpy.dtype
Datatype for times and distances. Optional, default: settings.FLOAT
dtype_indices : numpy.dtype
Datatype for indices. Optional, default: use the smallest unsigned
integers that fits.
Attributes
----------
res : dictionary
Rays stored as ``Rays`` objects, indexed by the ``paths``.
paths
Cf. above.
dtype
Cf. above.
dtype_indices
Cf. above.
cached_distance : dict
Keys: tuple of Points (points1, points2). Values: euclidean
distance between all points of 'points1' and all points of 'points2'.
cached_result : dict
Keys: Path. Values: _FermatSolverResult
"""
def __init__(self, fermat_paths_set, dtype=None, dtype_indices=None):
if dtype is None:
dtype = s.FLOAT
if dtype_indices is None:
dtype_indices = s.INT
for path in fermat_paths_set:
try:
hash(path)
except TypeError as e:
raise TypeError("Path must be hashable.") from e
self.dtype = dtype
self.dtype_indices = dtype_indices
self.clear_cache()
self.res = {}
self.paths = fermat_paths_set
self.num_minimization = 0
self.num_euc_distance = 0
@classmethod
def from_views(cls, views_list, dtype=None, dtype_indices=None):
"""
Create a FermatSolver from a list of views (alternative constructor).
Parameters
----------
views : list of Views
dtype : numpy.dtype or None
dtype_indices : numpy.dtype or None
Returns
-------
"""
paths = set(
(
path
for v in views_list
for path in (v.tx_path.to_fermat_path(), v.rx_path.to_fermat_path())
)
)
return cls(paths, dtype=dtype, dtype_indices=dtype_indices)
def solve(self):
"""
Compute the rays for all paths and store them in ``self.res``.
"""
self.solve_no_clean()
self.clear_cache()
return self.res
def solve_no_clean(self):
"""
Compute the rays for all paths and store them in ``self.res``.
"""
tic = time.perf_counter()
for path in self.paths:
self.res[path] = self._solve(path)
toc = time.perf_counter()
logger.info("Ray tracing: solved all in {:.3g}s".format(toc - tic))
return self.res
def _solve(self, path):
"""
Returns the rays starting from the first interface and last interface of ``path``.
This function is recursive. Intermediate results are stored
in self.cached_result and self.cached_distance.
Warning: it is not safe to call this with a Path not passed to __init__
because of possible overflows.
Returns
-------
res : Rays
"""
if path in self.cached_result:
# Cache hits, hourray
return self.cached_result[path]
# Special case if we have only two (consecutive) boundaries:
if len(path) == 3:
return self.consecutive_times(path)
# General case: compute by calling _solve() recursively:
head, tail = path.split_queue()
res_head = self._solve(head)
res_tail = self._solve(tail)
assert isinstance(res_head, Rays)
assert isinstance(res_tail, Rays)
self.num_minimization += 1
logger.debug(
"Ray tracing: solve for subpaths {} and {}".format(str(head), str(tail))
)
times, indices_at_interface = find_minimum_times(
res_head.times,
res_tail.times,
dtype=self.dtype,
dtype_indices=self.dtype_indices,
)
assert res_tail.fermat_path.num_points_sets == 2
indices = Rays.expand_rays(res_head.interior_indices, indices_at_interface)
del indices_at_interface # no more useful
res = Rays(times, indices, path)
self.cached_result[path] = res
return res
def clear_cache(self):
self.cached_distance = Cache()
self.cached_result = | |
<filename>unionml/model.py
"""Model class for defining training, evaluation, and prediction."""
import inspect
import os
from collections import OrderedDict
from dataclasses import asdict, dataclass, field, is_dataclass, make_dataclass
from functools import partial
from inspect import Parameter, signature
from typing import IO, Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Type, Union
import joblib
import sklearn
from dataclasses_json import dataclass_json
from fastapi import FastAPI
from flytekit import Workflow
from flytekit.configuration import Config
from flytekit.core.tracker import TrackedInstance
from flytekit.remote import FlyteRemote
from flytekit.remote.executions import FlyteWorkflowExecution
from unionml.dataset import Dataset
from unionml.utils import inner_task, is_keras_model, is_pytorch_model
@dataclass
class BaseHyperparameters:
"""Hyperparameter base class
This class is used to auto-generate the hyperparameter type based on the ``hyperparameter_config`` argument
or ``init`` callable signature in the :py:class:`unionml.model.Model`.
"""
pass
class ModelArtifact(NamedTuple):
"""Model artifact, containing a specific model object and optional metrics associated with it."""
#: model object
model_object: Any
#: hyperparameters associated with the model object
hyperparameters: Optional[Union[BaseHyperparameters, dict]] = None
#: metrics associated with the model object
metrics: Optional[Dict[str, float]] = None
class Model(TrackedInstance):
def __init__(
self,
name: str = "model",
init: Union[Type, Callable] = None,
*,
dataset: Dataset,
hyperparameter_config: Optional[Dict[str, Type]] = None,
):
"""Initialize a UnionML Model.
The term *UnionML Model* refers to the specification of a model, which the user defines through
the functional entrypoints, e.g. :meth:`unionml.model.Model.trainer`. The term *model object* is used to refer
to some instance of model from a machine learning framework such as the subclasses of the ``BaseEstimator``
class in sklearn, ``Module`` in pytorch, etc.
:param name: name of the model
:param init: a class or callable that produces a model object (e.g. an sklearn estimator) when invoked.
:param dataset: a UnionML Dataset object to bind to the model.
:param hyperparameter_config: A dictionary mapping hyperparameter names to types. This is used to
determine the hyperparameter names and types associated with the model object produced by
the ``init`` argument. For example:
>>> {
... "hyperparameter1": int,
... "hyperparameter2": str,
... "hyperparameter3": float,
... }
"""
super().__init__()
self.name = name
self._init_callable = init
self._hyperparameter_config = hyperparameter_config
self._dataset = dataset
self._artifact: Optional[ModelArtifact] = None
# default component functions
self._init = self._default_init
self._saver = self._default_saver
self._loader = self._default_loader
# properties needed for deployment
self._image_name: Optional[str] = None
self._config_file: Optional[str] = None
self._registry: Optional[str] = None
self._dockerfile: Optional[str] = None
self._project: Optional[str] = None
self._domain: Optional[str] = None
self.__remote__: Optional[FlyteRemote] = None
if self._dataset.name is None:
self._dataset.name = f"{self.name}.dataset"
# unionml-compiled tasks
self._train_task = None
self._predict_task = None
self._predict_from_features_task = None
# user-provided task kwargs
self._train_task_kwargs = None
self._predict_task_kwargs = None
# dynamically defined types
self._hyperparameter_type: Optional[Type] = None
@property
def artifact(self) -> Optional[ModelArtifact]:
"""Model artifact associated with the ``unionml.Model`` ."""
return self._artifact
@artifact.setter
def artifact(self, new_value: ModelArtifact):
self._artifact = new_value
@property
def hyperparameter_type(self) -> Type:
"""Hyperparameter type of the model object based on the ``init`` function signature."""
if self._hyperparameter_type is not None:
return self._hyperparameter_type
hyperparameter_fields: List[Any] = []
if self._hyperparameter_config is None:
# extract types from the init callable that instantiates a new model
model_obj_sig = signature(self._init_callable) # type: ignore
# if any of the arguments are not type-annotated, default to using an untyped dictionary
if any(p.annotation is inspect._empty for p in model_obj_sig.parameters.values()):
return dict
for hparam_name, hparam in model_obj_sig.parameters.items():
hyperparameter_fields.append((hparam_name, hparam.annotation, field(default=hparam.default)))
else:
# extract types from hyperparameters Model init argument
for hparam_name, hparam_type in self._hyperparameter_config.items():
hyperparameter_fields.append((hparam_name, hparam_type))
self._hyperparameter_type = dataclass_json(
make_dataclass("Hyperparameters", hyperparameter_fields, bases=(BaseHyperparameters,))
)
return self._hyperparameter_type
@property
def config_file(self) -> Optional[str]:
"""Path to the config file associated with the Flyte backend."""
return self._config_file
@property
def registry(self) -> Optional[str]:
"""Docker registry used to push UnionML app."""
return self._registry
@property
def dockerfile(self) -> Optional[str]:
"""Path to Docker file used to package the UnionML app."""
return self._dockerfile
@property
def train_workflow_name(self):
"""Name of the training workflow."""
return f"{self.name}.train"
@property
def predict_workflow_name(self):
"""Name of the prediction workflow used to generate predictions from the ``dataset.reader`` ."""
return f"{self.name}.predict"
@property
def predict_from_features_workflow_name(self):
"""Name of the prediction workflow used to generate predictions from raw features."""
return f"{self.name}.predict_from_features"
def init(self, fn):
"""Register a function for initializing a model object."""
self._init = fn
return self._init
def trainer(self, fn=None, **train_task_kwargs):
"""Register a function for training a model object."""
if fn is None:
return partial(self.trainer, **train_task_kwargs)
self._trainer = fn
self._train_task_kwargs = train_task_kwargs
return self._trainer
def predictor(self, fn=None, **predict_task_kwargs):
"""Register a function that generates predictions from a model object."""
if fn is None:
return partial(self.predictor, **predict_task_kwargs)
self._predictor = fn
self._predict_task_kwargs = predict_task_kwargs
return self._predictor
def evaluator(self, fn):
"""Register a function for producing metrics for given model object."""
self._evaluator = fn
return self._evaluator
def saver(self, fn):
"""Register a function for serializing a model object to disk."""
self._saver = fn
return self._saver
def loader(self, fn):
"""Register a function for deserializing a model object to disk."""
self._loader = fn
return self._loader
@property
def trainer_params(self) -> Dict[str, Parameter]:
"""Parameters used to create a Flyte workflow for model object training."""
return {
name: param
for name, param in signature(self._trainer).parameters.items()
if param.kind == Parameter.KEYWORD_ONLY
}
def train_workflow(self):
"""Create a Flyte training workflow for model object training."""
dataset_task = self._dataset.dataset_task()
train_task = self.train_task()
[
hyperparam_arg,
hyperparam_type,
], *_ = train_task.python_interface.inputs.items()
wf = Workflow(name=self.train_workflow_name)
# add hyperparameter argument
wf.add_workflow_input(hyperparam_arg, hyperparam_type)
# add dataset.reader arguments
for arg, type in dataset_task.python_interface.inputs.items():
wf.add_workflow_input(arg, type)
# add training keyword-only arguments
trainer_param_types = {k: v.annotation for k, v in self.trainer_params.items()}
for arg, type in trainer_param_types.items():
wf.add_workflow_input(arg, type)
dataset_node = wf.add_entity(
dataset_task,
**{k: wf.inputs[k] for k in dataset_task.python_interface.inputs},
)
train_node = wf.add_entity(
train_task,
**{
hyperparam_arg: wf.inputs[hyperparam_arg],
**dataset_node.outputs,
**{arg: wf.inputs[arg] for arg in trainer_param_types},
},
)
wf.add_workflow_output("model_object", train_node.outputs["model_object"])
wf.add_workflow_output("hyperparameters", train_node.outputs["hyperparameters"])
wf.add_workflow_output("metrics", train_node.outputs["metrics"])
return wf
def predict_workflow(self):
"""Create a Flyte prediction workflow using features from the ``dataset.reader`` as the data source."""
dataset_task = self._dataset.dataset_task()
predict_task = self.predict_task()
wf = Workflow(name=self.predict_workflow_name)
model_arg_name, *_ = predict_task.python_interface.inputs.keys()
wf.add_workflow_input("model_object", predict_task.python_interface.inputs[model_arg_name])
for arg, type in dataset_task.python_interface.inputs.items():
wf.add_workflow_input(arg, type)
dataset_node = wf.add_entity(
dataset_task,
**{k: wf.inputs[k] for k in dataset_task.python_interface.inputs},
)
predict_node = wf.add_entity(
predict_task, **{"model_object": wf.inputs["model_object"], **dataset_node.outputs}
)
for output_name, promise in predict_node.outputs.items():
wf.add_workflow_output(output_name, promise)
return wf
def predict_from_features_workflow(self):
"""Create a Flyte prediction workflow using raw features."""
predict_task = self.predict_from_features_task()
wf = Workflow(name=self.predict_from_features_workflow_name)
for i, (arg, type) in enumerate(predict_task.python_interface.inputs.items()):
# assume that the first argument is the model object
wf.add_workflow_input("model_object" if i == 0 else arg, type)
predict_node = wf.add_entity(predict_task, **{k: wf.inputs[k] for k in wf.inputs})
for output_name, promise in predict_node.outputs.items():
wf.add_workflow_output(output_name, promise)
return wf
def train_task(self):
"""Create a Flyte task for training a model object.
This is used in the Flyte workflow produced by ``train_workflow``.
"""
if self._train_task:
return self._train_task
# make sure hyperparameter type signature is correct
*_, hyperparameters_param = signature(self._init).parameters.values()
hyperparameters_param = hyperparameters_param.replace(annotation=self.hyperparameter_type)
# assume that reader_return_type is a dict with only a single entry
[(data_arg_name, data_arg_type)] = self._dataset.reader_return_type.items()
# get keyword-only training args
@inner_task(
unionml_obj=self,
input_parameters=OrderedDict(
[
(p.name, p)
for p in [
hyperparameters_param,
Parameter(
data_arg_name,
kind=Parameter.KEYWORD_ONLY,
annotation=data_arg_type,
),
*self.trainer_params.values(),
]
]
),
return_annotation=NamedTuple(
"ModelArtifact",
model_object=signature(self._trainer).return_annotation,
hyperparameters=self.hyperparameter_type,
metrics=Dict[str, signature(self._evaluator).return_annotation],
),
**({} if self._train_task_kwargs is None else self._train_task_kwargs),
)
def train_task(**kwargs):
hyperparameters = kwargs["hyperparameters"]
raw_data = kwargs[data_arg_name]
trainer_kwargs = {p: kwargs[p] for p in self.trainer_params}
hyperparameters_dict = asdict(hyperparameters) if is_dataclass(hyperparameters) else hyperparameters
training_data = self._dataset.get_data(raw_data)
model_object = self._trainer(
self._init(hyperparameters=hyperparameters_dict),
*training_data["train"],
**trainer_kwargs,
)
metrics = {
split_key: self._evaluator(model_object, *training_data[split_key]) for split_key in training_data
}
return model_object, hyperparameters, metrics
self._train_task = train_task
return train_task
def predict_task(self):
"""Create a Flyte task for generating predictions from a model object.
This is used in the Flyte workflow produced by ``predict_workflow``.
"""
if self._predict_task:
return self._predict_task
predictor_sig = signature(self._predictor)
model_param, *_ = predictor_sig.parameters.values()
model_param = model_param.replace(name="model_object")
# assume that reader_return_type is a dict with only a single entry
[(data_arg_name, data_arg_type)] = self._dataset.reader_return_type.items()
data_param = Parameter(data_arg_name, kind=Parameter.KEYWORD_ONLY, annotation=data_arg_type)
# TODO: make sure return type is not None
@inner_task(
unionml_obj=self,
input_parameters=OrderedDict([(p.name, p) for p in [model_param, data_param]]),
return_annotation=predictor_sig.return_annotation,
**self._predict_task_kwargs,
)
def predict_task(model_object, **kwargs):
parsed_data = self._dataset._parser(kwargs[data_arg_name], **self._dataset.parser_kwargs)
features = parsed_data[self._dataset._parser_feature_key]
return self._predictor(model_object, features)
self._predict_task = predict_task
return predict_task
def predict_from_features_task(self):
"""Create a Flyte task for generating predictions from a model object.
This is used in the Flyte workflow produced by ``predict_from_features_workflow``.
"""
if self._predict_from_features_task:
return self._predict_from_features_task
predictor_sig = | |
"""CSC148 Assignment 2
=== CSC148 Winter 2020 ===
Department of Computer Science,
University of Toronto
This code is provided solely for the personal and private use of
students taking the CSC148 course at the University of Toronto.
Copying for purposes other than this use is expressly prohibited.
All forms of distribution of this code, whether as given or with
any changes, are expressly prohibited.
Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>
All of the files in this directory and all subdirectories are:
Copyright (c) <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, and <NAME>
=== Module Description ===
This file contains the hierarchy of Goal classes.
"""
from __future__ import annotations
import random
from typing import List, Tuple, Union, Any
from block import Block
from settings import COLOUR_LIST
def generate_goals(num_goals: int) -> List[Goal]:
"""Return a randomly generated list of goals with length num_goals.
All elements of the list must be the same type of goal, but each goal
must have a different randomly generated colour from COLOUR_LIST. No two
goals can have the same colour.
Precondition:
- num_goals <= len(COLOUR_LIST)
"""
type_gen = random.randint(0, 1) # Perimeter or Blob
return_list = []
copy_colour = []
for color in COLOUR_LIST:
copy_colour.append(color)
if type_gen == 0: # Perimeter
i = 0
while i < num_goals:
color_gen = random.randint(0, len(copy_colour) - 1)
return_list.append(PerimeterGoal(copy_colour[color_gen]))
copy_colour.remove(copy_colour[color_gen])
i += 1
return return_list
for i in range(num_goals):
color_gen = random.randint(0, len(copy_colour) - 1)
return_list.append(BlobGoal(copy_colour[color_gen]))
copy_colour.remove(copy_colour[color_gen])
return return_list
def _flatten(block: Block) -> List[List[Tuple[int, int, int]]]:
"""Return a two-dimensional list representing <block> as rows and columns of
unit cells.
Return a list of lists L, where,
for 0 <= i, j < 2^{max_depth - self.level}
- L[i] represents column i and
- L[i][j] represents the unit cell at column i and row j.
Each unit cell is represented by a tuple of 3 ints, which is the colour
of the block at the cell location[i][j]
L[0][0] represents the unit cell in the upper left corner of the Block.
"""
return_list = []
unit = 2 ** (block.max_depth - block.level)
unit_size = block.size / unit
for i in range(unit):
temp_list = []
for j in range(unit):
temp_list.append(_get_colour(block, (i, j), unit_size))
return_list.append(temp_list)
return return_list
def _get_colour(block: Block, position: Tuple[int, int], unit_size: int) \
-> Tuple[int, int, int]:
"""
Return the color of a block located at 'position' in 'block',
represented by RGB
"""
if len(block.children) == 0:
return block.colour
x_pos = position[0] * unit_size + (unit_size / 100)
y_pos = position[1] * unit_size + (unit_size / 100)
for child in block.children:
child_left = child.position[0]
child_top = child.position[1]
child_right = child_left + child.size
child_bott = child_top + child.size
if child_left <= x_pos < child_right and \
child_top <= y_pos < child_bott:
return _get_colour(child, position, unit_size)
return None
def _remove_nested_list(obj: Union[Tuple[int, int, int], List]) -> Any:
"""
Return a 1D list with all elements in the nested list 'obj'
"""
if isinstance(obj, Tuple):
return obj
else:
lst = []
for sublist in obj:
temp = _remove_nested_list(sublist)
if isinstance(temp, Tuple):
lst.append(temp)
else:
lst.extend(temp)
return lst
class Goal:
"""A player goal in the game of Blocky.
This is an abstract class. Only child classes should be instantiated.
=== Attributes ===
colour:
The target colour for this goal, that is the colour to which
this goal applies.
"""
colour: Tuple[int, int, int]
def __init__(self, target_colour: Tuple[int, int, int]) -> None:
"""Initialize this goal to have the given target colour.
"""
self.colour = target_colour
def score(self, board: Block) -> int:
"""Return the current score for this goal on the given board.
The score is always greater than or equal to 0.
"""
raise NotImplementedError
def description(self) -> str:
"""Return a description of this goal.
"""
raise NotImplementedError
class PerimeterGoal(Goal):
"""
The player must aim to put the most possible units of a given colour c
on the outer perimeter of the board. The player’s score is the total
number of unit cells of colour c that are on the perimeter.
There is a premium on corner cells: they count twice towards the score.
"""
def score(self, board: Block) -> int:
"""
Return the score this player can get from this 'board'
if the player has perimeter goal.
"""
target_colour = self.colour
score = 0
flat_list = _flatten(board)
for j in range(len(flat_list)):
if flat_list[0][j] == target_colour:
if j in [0, len(flat_list) - 1]:
score += 2
else:
score += 1
for j in range(len(flat_list)):
if flat_list[-1][j] == target_colour:
if j in [0, len(flat_list) - 1]:
score += 2
else:
score += 1
for j in range(2): # first and last row
for i in range(1, len(flat_list) - 1): # deduct corners
if flat_list[i][-j] == target_colour:
score += 1
return score
def description(self) -> str:
"""
Return a string describing perimeter goal.
"""
descrip = 'The player must aim to put the most possible units of a ' \
'given colour c on the outer perimeter of ' \
'the board. The ' \
'player’s score is the total number of unit cells ' \
'of colour ' \
'c that are on the perimeter. There is a ' \
'premium on corner ' \
'cells: they count twice towards the score. '
return descrip
class BlobGoal(Goal):
"""
The player must aim for the largest “blob” of a given colour c.
A blob is a group of connected blocks with the same colour.
Two blocks are connected if their sides touch; touching
corners doesn’t count. The player’s score is the number
of unit cells in the largest blob of colour c.
"""
def score(self, board: Block) -> int:
"""
Return the score this player can get from this 'board'
if the player has blob goal.
"""
flat_board = _flatten(board)
board_size = len(flat_board)
# create parallel board
visited = []
for i in range(board_size):
temp_list = []
for j in range(board_size):
temp_list.append(-1)
visited.append(temp_list)
score_list = []
for i in range(board_size):
for j in range(board_size):
score_list.append \
(self._undiscovered_blob_size((i, j), flat_board, visited))
return max(score_list)
def _undiscovered_blob_size(self, pos: Tuple[int, int],
board: List[List[Tuple[int, int, int]]],
visited: List[List[int]]) -> int:
"""Return the size of the largest connected blob that (a) is of this
Goal's target colour, (b) includes the cell at <pos>, and (c) involves
only cells that have never been visited.
If <pos> is out of bounds for <board>, return 0.
<board> is the flattened board on which to search for the blob.
<visited> is a parallel structure that, in each cell, contains:
-1 if this cell has never been visited
0 if this cell has been visited and discovered
not to be of the target colour
1 if this cell has been visited and discovered
to be of the target colour
Update <visited> so that all cells that are visited are marked with
either 0 or 1.
"""
board_size = len(board)
if pos[0] < 0 or pos[0] >= board_size \
or pos[1] < 0 or pos[1] >= board_size:
return 0
column = pos[0]
row = pos[1]
if not board[column][row] == self.colour:
visited[column][row] = 0
return 0
score = 1
visited[column][row] = 1
# upper cell
if row - 1 >= 0:
if visited[column][row - 1] == -1:
score += self._undiscovered_blob_size((column, row - 1),
board, visited)
# lower cell
if row + 1 <= board_size - 1:
if visited[column][row + 1] == -1:
score += self._undiscovered_blob_size((column, row + 1),
board, visited)
# left cell
if column - 1 >= 0:
if visited[column - 1][row] == -1:
score += self._undiscovered_blob_size((column - 1, row),
board, visited)
if column + 1 <= board_size - 1:
if visited[column + 1][row] == -1:
score += self._undiscovered_blob_size((column + 1, row),
board, visited)
return score
def description(self) -> str:
"""
Return a string describing blob goal.
"""
descrip = 'The player must aim for the largest “blob” of a given ' \
'colour c. A blob is a group of connected blocks with the ' \
'same colour. Two blocks are connected if their sides ' \
'touch; touching corners doesn’t | |
hippocampus - note: currently reverts back to reptile')
hippocampus 5. Human hippocampus - note: pending status, some simple causal features')
hippocampus 6. Superintelligence level 1 - note: currently reverts back to lower human level')
hippocampus 7. Superintelligence level 2 - note: currently reverts back to lower human level')
#
--quick review of what hippocampus in biology does (since otherwise literature confusing for
non-bio background reader due to terminology):
-mammals have left and right hippocampi
-in mammals part of the 'allocortex'=='heterogenetic cortex' versus the 'neocortex'
(neocortex 6 layers vs. 3-4 cell layers in allocortex; types of allocortex: paleocortex,
archicortex and transitional (ie, to neocortex) periallocortex)
olfactory system also part of the allocortex
-considered part of 'limbic system' (=='paleomammalian cortex' midline structures
on L and R of thalamus, below temporal lobe; involved in emotion, motivation,
olfactory sense and memory; making memories affected by limibc system)
(basic limbic system is really amygdala, mammillary bodies, stria medull, nuc Gudden,
but also tightly connected to limbic thalamus, cingulate gyrus, hippocampus,
nucleus accumbens, anterior hypothalamus, ventral tegmental area, raphe nuc,
hebenular commissure, entorhinal cortex, olfactory bulbs)
-hippocampus involved in spatial memory, in this CCA1 simulation that is what similarly
name method does
-hippcampus also involved in putting together portions of memory throughout whole brain
-given relation between learning and memory, not surprising to find that hippocampus involved
in learning; in more advanced hippocampal methods, ie, beyond LAMPREY level
these indeed are implemented in this method
-hippocampus needed for consolidation of (certain types) of short-term memories
into long-term memory, and for spatial navigation
-above note that in primates hippocampus is in bottom part of medial temporal lobe
-hippocampus = 'hippocampus proper'==Ammon's horn + dentate gyrus
-hippocampus in all mammals; animals with better spatial memories are found to have
larger hippocampal structures
-in other vertebrates (ie, fish to reptiles) don't have an allocortex but vertebrates
do have pallium which evolved to cortex in mammals
-even lamprey and hagfish (ancient jawless fish) have a pallium
-medial, lateral, dorsal pallium
-medial pallium is precursor of hippocampus and is homologous in other vertebrates but does
not look like hippocampus found in mammals
-evidence that hippocampal-like homologues used for spatial navigation in fish,
reptiles, and fish -- thus in CCA1 we call all these 'hippocampus' have different
levels of 'hippocampus' methods
-insect brain mushroom bodies may have function like hippocampal-like structures in
vertebrates, but homology uncertain, so we don't deal with in the CCA1
#
--documentation of behavior of hippocampus() method
CURRENTLY BEING TRANSITIONED INTO NANO CODE FROM PREVIOUS MBLS3 MODELS'''
# if no current hippocampal goal direction in memory, then there
# is not currently a local minimum, and so, reset the variable keeping
# track how many times we have tried this local minimum
if not d.h_mem_dirn_goal and d.local_minimum > 0:
d.local_minimum = 0
# even given the simple NESW hippocampal goal direction algo below (which may become more complex as code develops)
# it is possible the cca1 mistakes, eg, an edge for the lost hiker and thus this gets put in the
# the hippocampal goal direction memory d.h_mem_dirn_goal, and the cca1 endlessly tries to go in this direction
# thus, first we consider if a local minimum has been reached, and if so, we will simply try to
# get out of it by going in a random direction
if d.local_minimum > g.tries_before_declare_local_minimum:
d.h_mem_prev_dirn_goal = d.h_mem_dirn_goal
d.h_mem_dirn_goal = None
direction_to_proceed = hippo2_reasonable_random_direction(d, g, h)
print(
"debug: hippo2: local_min reset from {} to 0 and trying random dirn".format(
d.local_minimum
)
)
return direction_to_proceed
# update the hippocampal memory, ie, is there any or new goal direction?
# simply consider NESW surrounding current position and look for hiker
# (ie, goal) in 1 square distance
for i, j in ((0, "00"), (1, "01"), (2, "10"), (3, "11")):
if d.max_fused_index[i] == "0101000011110000": # lost hiker
print("debug: direction {} is lost hiker".format(j))
d.h_mem_prev_dirn_goal = d.h_mem_dirn_goal
d.h_mem_dirn_goal = str(j)
print(
"debug: hippo2: h_mem_dirn_goal was just set to {} and local_min is {}".format(
d.h_mem_dirn_goal, d.local_minimum
)
)
# is there a value in d.h_mem_dirn_goal, ie, is there a direction the
# hippocampus has stored as saying we should go in to get the goal?
if d.h_mem_dirn_goal:
# increment local minimum tracking variable after a number of tries
# we do not keep on going in this direction (eg, cca1 may think
# sensory info indicates hiker in square in this direction but in
# fact the sensory info is wrong and hiker is not there)
d.local_minimum += 1
print(
"debug: hippo2: d.h_mem_dirn_goal (ie, there is a goal) and local_minimum incremented",
d.h_mem_dirn_goal,
d.local_minimum,
)
direction = d.h_mem_dirn_goal
if direction in ["00", 0, "N", "north", "North"]:
direction = 0
elif direction in ["01", 1, "E", "east", "East"]:
direction = 1
elif direction in ["10", 2, "S", "south", "South"]:
direction = 2
elif direction in ["11", 1, "W", "west", "West"]:
direction = 3
else:
print("debug: invalid value for d.h_mem_dirn_goal being evaluated")
d.h_mem_dirn_goal = None
direction = 0 # arbitrary assign value for now
# now check to see if d.h_mem_dirn_goal direction is reasonable
# for example, new sensory inputs this evaluation cycle may indicate it is not
print(
"now evaluating if {} direction from d.h_mem_dirn_goal should be avoided".format(
d.h_mem_dirn_goal
)
)
# hippo_calc() looks at int_map and decides if should go in a queried direction
# returns True if ok to go in that direction, False if not ok
# False if edge, lake
if d.max_fused_value[direction] == "EDGE ": # EDGE
# g.gconscious(['....but this is thought to be an EDGE thus hippo_calc will be False', direction])
d.h_mem_dirn_goal = None
print("not returning d.h_mem_dirn_goal direction as it is an EDGE")
elif d.max_fused_value[direction] == "lake ": # lake
# g.gconscious(['....but this is thought to be a lake thus try again navigation for this move', direction])
d.h_mem_dirn_goal = None
print("not returning d.h_mem_dirn_goal direction as it is a lake")
else:
print(
"debug: d.h_mem_dirn_goal direction {} reasonable".format(
d.h_mem_dirn_goal
)
)
return d.h_mem_dirn_goal
# else d.h_mem_dirn_goal ==None
# no value in d.h_mem_dirn_goal, thus must decide direction
direction_to_proceed = hippo2_reasonable_random_direction(d, g, h)
print(
"debug: hippo2: trying reasonable random (actually look for hiker) dirn {}".format(
direction_to_proceed
)
)
return direction_to_proceed
def int_map_update(d, direction="00", geo_feature="forest"):
'''internal hippocampal spatial map constructed and used by the CCA1
note: in "nano" version the philosophy is to emulate modules which are replaced
by more authentic components in the finer grain simulations, thus very artificial
cartesian map constructed here
#global variable recap:
d.h_mem_dirn_goal = None
d.h_mem_prev_dirn_goal = None
d.cca1_position = (INITIATE_VALUE, INITIATE_VALUE)
hiker_position = (INITIATE_VALUE, INITIATE_VALUE)
forest_map = [['forest', 'forest', 'sh_rvr', 'forest'],
['lake ', 'forest', 'forest', 'forest'],
['forest', 'wtrfall', 'forest', 'forest'],
['forest', 'forest', 'forest', 'forest']]
nb. m rows x n columns coordinates, start 0,0 --
forest_map coords superimposed on int_map below
int_map = [['', '', '', '', '', ''],
['', 0,0'', '', '', 0,3'', ''],
['', 1,0'', '', '', '', ''],
['', 2,0'', '', '', '', ''],
['', 3,0'', '', '', 3,3'', ''],
['', '', '', '', '', '']]
nb. start at 0,0 also, note includes EDGE squares which forest_map does not
#
see note about this being emulation level in "nano" version
nonetheless, the function is to provide CCA1 history of where this
direction, this location leads to
'''
# convert d.cca1_position from forest map into int_map coords
m, n = (d.cca1_position[0] + 0, d.cca1_position[1] + 0)
# flag that square as being one where CCA1 was already if no geo_feature
if d.int_map[m][n] == "":
d.int_map[m][n] = "explored"
# now flag square to direction specified with geo_feature given
# nb in this "nano" version just overwrite whatever is there, but in more
# authentic finer grain simulations to consider validity of data better
# (geo_feature 'uncertain' used for this purpose as well as expanded set of
# geo_features)
if direction in | |
<reponame>hhu-stups/python_jvm
# -*- coding: utf-8 -*-
import new, sys, os, types, native, thread
#import classfile, classloader, javaclasses
from classloader import JClass, descriptor # eg. for putfield
from helper import make_String, throw_NullPointerException, throw_ArithmeticException, throw_ArrayIndexOutOfBoundsException, throw_ClassCastException
from hooks import HOOKS, vmobject_getClass_helper
from objectmodel import TypedMap, Objectref, Classref, Arrayref, Stack, JObject, JException, JArrayClass
from native import current_classloader, env_ptr # for jni
from ctypes import py_object, c_int, c_float, c_long, c_double, c_char
from rpython.rlib.rarithmetic import r_singlefloat, r_longlong
from rpython.rlib.objectmodel import instantiate
# for threading:
interp_lock = thread.allocate_lock()
opcodes_count = 0
OPCODES_MAX = 2**8
# JVMS: 4.4.4
# cast int (n) to IEEE 754 float (num)
# TODO: NaN infinety
def float_parse(n):
if(n>>31)==0:
s = 1
else:
s = -1
e = n>>23 & 0xff
if e==0:
m = n & 0x7fffff << 1
else:
m = (n & 0x7fffff) | 0x800000
num = s*m*2**(e-150)
return r_singlefloat(num)
# JVMS: 4.4.5
def parse_double(n):
if(n>>63)==0:
s = 1
else:
s = -1
e = n>>52 & 0x7ff
if e==0:
m = n & 0xfffffffffffff << 1
else:
m = (n & 0xfffffffffffff) | 0x10000000000000
num = s*m*2**(e-1075)
return num
def intmask(n): # mask to a 32-bit int
n = n & 0xFFFFFFFF
if n >= 0x80000000:
n -= 0x100000000
return int(n)
def shortmask(n):
n = n & 0xFFFF
if n >= 0x8000:
n -= 0x10000
return int(n)
def signedbytemask(n):
n = n & 0xFF
if n >= 0x80:
n -= 0x100
return int(n)
def cast_char(c):
if isinstance(c, int):
return unichr(c)
return c
# adds to the AbstractClassLoader just one method
# which is able to run Frames
class ClassLoader(classloader.AbstractClassLoader):
def __init__(self, path):
classloader.AbstractClassLoader.__init__(self, path)
# XXX: do not use classref. maybe a problem even if only intern
self.called_classes = Arrayref([],None, self.getclass("[Ljava.lang.Class;")) # needed for e.g stackwalker hook
self.extern_libs = {}
# This method is called by the invoke-virtual,
# -special, -staticmethods (or native.py/JNI) and at the jvm-start
# It executes the bytecode by using a Frame
# TODO: refactor this method
def invoke_method(self, cls, method, descr, args):
# remember caller
native_args = args.clone()
classname, supercls = self.parse_parameter(cls)
jcls = JClass(classname, supercls, cls)
self.called_classes.arrayref.insert(0,Classref(self.getclass("java/lang/Class"), True, jcls, self))
const = cls.constant_pool
classNameIndex = const[cls.this_class].name_index
self.currentclsName = clsName = const[classNameIndex]
self.currentmethodName = methodName = const[method.name_index]
argcount = len(descr) - 1 # one elem. is the return value
int_locals = {}
ref_locals = {}
float_locals = {}
double_locals = {}
long_locals = {}
i = 0
# safe "this" reference (invisible in descr)
if not (method.access_flags & method.ACC_STATIC):
arg = args.pop()
ref_locals[i] = arg
i = i + 1
#assert len(args) == argcount
#char_locals = {}
# index index in the descr list, used to get this obejct
# i index in the stacks, used to handle 2x values
#args.print_stack()
for index in range(argcount):
arg = args.pop()
if descr[index] == "int" or descr[index]=="boolean" or descr[index] == "short" or descr[index]=="byte":
int_locals[i] = arg
assert isinstance(arg,int)
elif descr[index] == "char":
#print arg
#char_locals[i] = arg
#if isinstance(arg, str):
arg = ord(arg)
int_locals[i] = arg
elif descr[index] == "float":
float_locals[i] = r_singlefloat(arg)
elif descr[index] == "long":
long_locals[i] = r_longlong(arg)
i = i+1 # long needs two stack places
elif descr[index] == "double":
double_locals[i] = arg
i = i+1 # doubles reserve two places
assert isinstance(arg,float) #python float ==c double
else:
ref_locals[i] = arg
assert not isinstance(arg,r_singlefloat)
assert not isinstance(arg,int)
assert not isinstance(arg,float)
assert not isinstance(arg,r_longlong)
i = i + 1
locals = TypedMap()
#locals.set_char_map(char_locals)
locals.set_int_map(int_locals)
locals.set_ref_map(ref_locals)
locals.set_float_map(float_locals)
locals.set_double_map(double_locals)
locals.set_long_map(long_locals)
if method.access_flags & method.ACC_SYNCHRONIZED:
if not (method.access_flags & method.ACC_STATIC):
monitor = ref_locals[0]
from java_threading import monitorenter
monitorenter(self, monitor)
#else:
# raise NotImplemented("STATIC SYNCHRONIZED METHODS")
# hook test
#print "invoking methodname:",methodName,str(descr)
# print "invoking class:",clsName
#if methodName == "<init>" and clsName == "java/lang/String":
# print locals.print_map()
# print locals.get(0,"ref").jcls.__name__
# raise Exception("A")
#print
#if methodName == "getProperty":
# arrayr=locals.get(1,"ref").fields.get(unicode("value"),"array")
# print arrayr.arrayref
# print "locals:",locals.print_map()
# if hook:run it
if (clsName, methodName) in HOOKS:
# use gnucp native File-impl. if posix
if not (os.name == "posix" and clsName == "java/io/VMFile"):
hook_method = HOOKS[(clsName,methodName)]
return hook_method(locals, self, cls, method)
# if native, call native method
if (method.access_flags & method.ACC_NATIVE):
return self.call_native(clsName, methodName, descr, native_args, jcls, method)
# else run bytecode
self.called_classes.arrayref.insert(0,Classref(self.getclass("java/lang/Class"), True, jcls, self))
# create Frame
frame = Frame(self, cls, method)
frame.set_locals(locals)
#try:
re = frame.run()
if method.access_flags & method.ACC_SYNCHRONIZED:
if not (method.access_flags & method.ACC_STATIC):
monitor = ref_locals[0]
from java_threading import monitorexit
monitorexit(monitor)
#else:
# raise NotImplemented("STATIC SYNCHRONIZED METHODS")
#print "leaving methodname:",methodName
#print "leaving class:",clsName
self.called_classes.arrayref.pop()
return re
#except JException, je:
#print "exception in methodname:",methodName
#print "exception in class:",clsName
# self.called_classes.arrayref.pop()
# return je
def init_static(self, pycls, method):
frame = Frame(self, pycls, method)
frame.run()
def arg_to_ctype(self, arg):
if arg == None:
return None
elif arg == "int" or arg == "byte" or arg == "short" or arg =="boolean":
return c_int
elif arg == "char":
return c_char
elif arg == "float":
return c_float
elif arg == "long":
return c_long
elif arg == "double":
return c_double
else:
return py_object
def call_native(self, clsName, methodName, descr, args, jcls, method):
native.current_classloader = self # set global var of native.py
real_method_name = "Java_"+ clsName.replace("_","_1").replace(";","_2").replace("[","_3").replace("/","_")+"_"+methodName.replace("_","_1").replace(";","_2").replace("[","_3")
cfunction = None
for lib in self.extern_libs.values():
try:
cfunction = eval("lib."+real_method_name)
break
except AttributeError:
continue
assert not cfunction == None
# last arg is return value
cfunction.restype = self.arg_to_ctype(descr.pop())
if not (method.access_flags & method.ACC_STATIC):
objref = args.pop()
else:
objref = Classref(self.getclass("java/lang/Class"), True, jcls, self) # TODO: this is a classref
string = "cfunction(env_ptr, py_object(objref)"
for i in range(len(descr)):
ctype = self.arg_to_ctype(descr[i])
string += ", "+str(ctype.__name__)+"(args.pop())"
string += ")"
#print real_method_name
#print self.extern_libs
#print cfunction
#print "desc",descr
#print "loc", args
#print objref
#print string
# set our "exception-memory" to none
native.exception_after_native_call = None
result = eval(string) # execute native call
#print result
# the native call has created an Exception
if native.exception_after_native_call:
# this exception will be handled by the caller
jexception = native.exception_after_native_call
raise jexception
#print "JNI RES:",result
return result
# FIXME use r_singlefloat and r_long_long
# javaclasses > print has problems with this types
DESCR_CAST = {'byte': signedbytemask,
'char': cast_char,
'double': float,
'float': float,#r_singlefloat,
'int': intmask,
'long': long,#r_longlong,
'short': shortmask,
'boolean': bool,
}
# FIXME: Frame and interpreter are mixed :(
# After every methodcall, a Stackframe is created
# It executes every Java-Bytecode until a return opcode occurs
class Frame(object):
intmask = staticmethod(intmask)
instantiate = staticmethod(instantiate) ## XXX for javaclasses.py
DESCR_CAST = DESCR_CAST
DESCR_UNCAST = {'char': ord}
def __init__(self, loader, cls, method):
self.loader = loader
self.cls = cls
self.const = cls.constant_pool
self.method = method
self.co = cls.getattr(method, 'Code', classfile.Code_attribute) # XXX maybe not Rpython
self.stack = Stack()
self.locals = TypedMap()
def run(self):
global opcodes_count
global OPCODES_MAX
global interp_lock
m_nam = unicode(self.const[self.method.name_index])
cls_inf = self.const[self.cls.this_class]
#print "\t", self.const[cls_inf.name_index],":",m_nam
self.next_instr = 0
try:
while True:
opcodes_count = opcodes_count +1
if opcodes_count==OPCODES_MAX:
opcodes_count = 0 # reset for next thread in que
import java_threading
if not java_threading.currentVMThread == "main_no_init":
java_threading.currentVMThread.STATE =make_String("WAITING", self.loader)
temp = java_threading.currentVMThread
#print "inperp:", java_threading.currentVMThread
interp_lock.release()
# if there is an other thread it will get the
# exc. control here!
interp_lock.acquire()
#TODO: do something if currentVMThread.isInterrupted = True
java_threading.currentVMThread = temp
if not java_threading.currentVMThread == "main_no_init":
java_threading.currentVMThread.STATE =make_String("RUNNABLE", self.loader)
last_instr = self.next_instr
num = self.nextbyte()
opimpl = getattr(self, 'opcode_0x%02x' % (num,)) # XXX not Rpython
import java_threading
#print '\t', java_threading.currentVMThread, ":", self.next_instr-1,": ",opimpl.__doc__
#self.stack.print_stack()
#print "\t", self.const[cls_inf.name_index],":",m_nam
try:
if opimpl():
# this block is only visited after an
# jump-opcode
self.next_instr = last_instr + 1
offset = self.nextsignedword()
if num in WIDE_TARGET:
offset = (offset << 16) | self.nextword()
self.next_instr = last_instr + offset
except JException, je:
#print je.objref.jcls.__name__
#print "\t", self.const[cls_inf.name_index],":",m_nam
self.handle_exception(je.objref)
except Return, r:
return r.retval
def set_locals(self, locals):
self.locals = locals
def nextbyte(self):
index = self.next_instr
self.next_instr = index + 1
return ord(self.co.code[index])
def nextsignedbyte(self):
return signedbytemask(self.nextbyte())
def nextword(self):
index = self.next_instr
self.next_instr = index + 2
return (ord(self.co.code[index]) << 8) | ord(self.co.code[index + 1])
def nextdoubleword(self):
index = self.next_instr
self.next_instr = index + 4
return (ord(self.co.code[index]) << 24) | (ord(self.co.code[index + 1]) << 16) | (ord(self.co.code[index + 2]) << 8) | |
= node.diskUpdate(disks=update_disks)
disks = get_update_disks(new_node.disks)
assert disks[list(new_node.disks)[0]].tags == tags["disk"]
new_node = set_node_tags(client, node, tags["node"])
assert new_node.tags == tags["node"]
tag_mappings[node.id] = tags
yield tag_mappings
client = get_longhorn_api_client() # NOQA
nodes = client.list_node()
for node in nodes:
update_disks = get_update_disks(node.disks)
update_disks[list(update_disks)[0]].tags = []
new_node = node.diskUpdate(disks=update_disks)
disks = get_update_disks(new_node.disks)
assert disks[list(new_node.disks)[0]].tags is None
new_node = set_node_tags(client, node)
assert new_node.tags is None
@pytest.fixture
def random_labels():
labels = {}
i = 0
while i < 3:
key = "label/" + "".join(random.choice(string.ascii_lowercase +
string.digits)
for _ in range(6))
if not labels.get(key):
labels["key"] = generate_random_data(VOLUME_RWTEST_SIZE)
i += 1
return labels
@pytest.fixture
def client(request):
"""
Return an individual Longhorn API client for testing.
"""
k8sconfig.load_incluster_config()
# Make sure nodes and managers are all online.
ips = get_mgr_ips()
# check if longhorn manager port is open before calling get_client
for ip in ips:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mgr_port_open = sock.connect_ex((ip, 9500))
if mgr_port_open == 0:
client = get_client(ip + PORT)
break
hosts = client.list_node()
assert len(hosts) == len(ips)
request.addfinalizer(lambda: cleanup_client())
cleanup_client()
return client
@pytest.fixture
def clients(request):
k8sconfig.load_incluster_config()
ips = get_mgr_ips()
client = get_client(ips[0] + PORT)
hosts = client.list_node()
assert len(hosts) == len(ips)
clis = get_clients(hosts)
def finalizer():
cleanup_client()
request.addfinalizer(finalizer)
cleanup_client()
return clis
def cleanup_client():
client = get_longhorn_api_client()
# cleanup test disks
cleanup_test_disks(client)
volumes = client.list_volume()
for v in volumes:
# ignore the error when clean up
try:
client.delete(v)
except Exception as e:
print("Exception when cleanup volume ", v, e)
pass
images = client.list_engine_image()
for img in images:
if not img.default:
# ignore the error when clean up
try:
client.delete(img)
except Exception as e:
print("Exception when cleanup image", img, e)
pass
# enable nodes scheduling
reset_node(client)
reset_settings(client)
reset_disks_for_all_nodes(client)
reset_engine_image(client)
# check replica subdirectory of default disk path
if not os.path.exists(DEFAULT_REPLICA_DIRECTORY):
subprocess.check_call(
["mkdir", "-p", DEFAULT_REPLICA_DIRECTORY])
def get_client(address):
url = 'http://' + address + '/v1/schemas'
c = longhorn.from_env(url=url)
return c
def get_mgr_ips():
ret = k8sclient.CoreV1Api().list_pod_for_all_namespaces(
label_selector="app=longhorn-manager",
watch=False)
mgr_ips = []
for i in ret.items:
mgr_ips.append(i.status.pod_ip)
return mgr_ips
def get_self_host_id():
envs = os.environ
return envs["NODE_NAME"]
def get_backupstore_url():
backupstore = os.environ['LONGHORN_BACKUPSTORES']
backupstore = backupstore.replace(" ", "")
backupstores = backupstore.split(",")
assert len(backupstores) != 0
return backupstores
def get_clients(hosts):
clients = {}
for host in hosts:
assert host.name is not None
assert host.address is not None
clients[host.name] = get_client(host.address + PORT)
return clients
def wait_scheduling_failure(client, volume_name):
"""
Wait and make sure no new replicas are running on the specified
volume. Trigger a failed assertion of one is detected.
:param client: The Longhorn client to use in the request.
:param volume_name: The name of the volume.
"""
scheduling_failure = False
for i in range(RETRY_COUNTS):
v = client.by_id_volume(volume_name)
if v.conditions.scheduled.status == "False" and \
v.conditions.scheduled.reason == \
"ReplicaSchedulingFailure":
scheduling_failure = True
if scheduling_failure:
break
time.sleep(RETRY_INTERVAL)
assert scheduling_failure
def wait_for_device_login(dest_path, name):
dev = ""
for i in range(RETRY_COUNTS):
for j in range(RETRY_COMMAND_COUNT):
files = []
try:
files = os.listdir(dest_path)
break
except Exception:
time.sleep(1)
assert files
if name in files:
dev = name
break
time.sleep(RETRY_INTERVAL)
assert dev == name
return dev
def wait_for_replica_directory():
found = False
for i in range(RETRY_COUNTS):
if os.path.exists(DEFAULT_REPLICA_DIRECTORY):
found = True
break
time.sleep(RETRY_INTERVAL)
assert found
def wait_for_volume_creation(client, name):
for i in range(RETRY_COUNTS):
volumes = client.list_volume()
found = False
for volume in volumes:
if volume.name == name:
found = True
break
if found:
break
assert found
def wait_for_volume_endpoint(client, name):
for i in range(RETRY_COUNTS):
v = client.by_id_volume(name)
engine = get_volume_engine(v)
if engine.endpoint != "":
break
time.sleep(RETRY_INTERVAL)
check_volume_endpoint(v)
return v
def wait_for_volume_detached(client, name):
return wait_for_volume_status(client, name,
VOLUME_FIELD_STATE,
VOLUME_STATE_DETACHED)
def wait_for_volume_detached_unknown(client, name):
wait_for_volume_status(client, name,
VOLUME_FIELD_ROBUSTNESS,
VOLUME_ROBUSTNESS_UNKNOWN)
return wait_for_volume_detached(client, name)
def wait_for_volume_healthy(client, name):
wait_for_volume_status(client, name,
VOLUME_FIELD_STATE,
VOLUME_STATE_ATTACHED)
wait_for_volume_status(client, name,
VOLUME_FIELD_ROBUSTNESS,
VOLUME_ROBUSTNESS_HEALTHY)
return wait_for_volume_endpoint(client, name)
def wait_for_volume_healthy_no_frontend(client, name):
wait_for_volume_status(client, name,
VOLUME_FIELD_STATE,
VOLUME_STATE_ATTACHED)
return wait_for_volume_status(client, name,
VOLUME_FIELD_ROBUSTNESS,
VOLUME_ROBUSTNESS_HEALTHY)
def wait_for_volume_degraded(client, name):
wait_for_volume_status(client, name,
VOLUME_FIELD_STATE,
VOLUME_STATE_ATTACHED)
return wait_for_volume_status(client, name,
VOLUME_FIELD_ROBUSTNESS,
VOLUME_ROBUSTNESS_DEGRADED)
def wait_for_volume_faulted(client, name):
wait_for_volume_status(client, name,
VOLUME_FIELD_STATE,
VOLUME_STATE_DETACHED)
return wait_for_volume_status(client, name,
VOLUME_FIELD_ROBUSTNESS,
VOLUME_ROBUSTNESS_FAULTED)
def wait_for_volume_status(client, name, key, value):
wait_for_volume_creation(client, name)
for i in range(RETRY_COUNTS):
volume = client.by_id_volume(name)
if volume[key] == value:
break
time.sleep(RETRY_INTERVAL)
assert volume[key] == value
return volume
def wait_for_volume_delete(client, name):
for i in range(RETRY_COUNTS):
volumes = client.list_volume()
found = False
for volume in volumes:
if volume.name == name:
found = True
break
if not found:
break
time.sleep(RETRY_INTERVAL)
assert not found
def wait_for_backup_volume_delete(client, name):
for i in range(RETRY_COUNTS):
bvs = client.list_backupVolume()
found = False
for bv in bvs:
if bv.name == name:
found = True
break
if not found:
break
time.sleep(RETRY_INTERVAL)
assert not found
def wait_for_volume_current_image(client, name, image):
wait_for_volume_creation(client, name)
for i in range(RETRY_COUNTS):
volume = client.by_id_volume(name)
if volume.currentImage == image:
break
time.sleep(RETRY_INTERVAL)
assert volume.currentImage == image
return volume
def wait_for_volume_replica_count(client, name, count):
wait_for_volume_creation(client, name)
for i in range(RETRY_COUNTS):
volume = client.by_id_volume(name)
if len(volume.replicas) == count:
break
time.sleep(RETRY_INTERVAL)
assert len(volume.replicas) == count
return volume
def wait_for_volume_replicas_mode(client, volname, mode, replicas_name=None):
verified = False
for i in range(RETRY_COUNTS):
volume = client.by_id_volume(volname)
count = 0
replicas = []
if replicas_name is None:
replicas = volume.replicas
else:
for r_name in replicas_name:
found = False
for r in volume.replicas:
if r.name == r_name:
replicas.append(r)
found = True
assert found
for r in replicas:
if r.mode == mode:
count += 1
if count == len(replicas):
verified = True
break
time.sleep(RETRY_INTERVAL)
assert verified
return volume
def wait_for_snapshot_purge(client, volume_name, *snaps):
completed = 0
last_purge_progress = {}
purge_status = {}
for i in range(RETRY_COUNTS):
completed = 0
v = client.by_id_volume(volume_name)
purge_status = v.purgeStatus
for status in purge_status:
assert status.error == ""
progress = status.progress
assert progress <= 100
replica = status.replica
last = last_purge_progress.get(replica)
assert last is None or last <= status.progress
last_purge_progress["replica"] = progress
if status.state == "complete":
assert progress == 100
completed += 1
if completed == len(purge_status):
break
time.sleep(RETRY_INTERVAL)
assert completed == len(purge_status)
# Now that the purge has been reported to be completed, the Snapshots
# should should be removed or "marked as removed" in the case of
# the latest snapshot.
found = False
snapshots = v.snapshotList(volume=volume_name)
for snap in snaps:
for vs in snapshots.data:
if snap == vs["name"]:
if vs["removed"] is False:
found = True
break
if "volume-head" not in vs["children"]:
found = True
break
assert not found
return v
def wait_for_engine_image_creation(client, image_name):
for i in range(RETRY_COUNTS):
images = client.list_engine_image()
found = False
for img in images:
if img.name == image_name:
found = True
break
if found:
break
assert found
def wait_for_engine_image_state(client, image_name, state):
wait_for_engine_image_creation(client, image_name)
for i in range(RETRY_COUNTS):
image = client.by_id_engine_image(image_name)
if image.state == state:
break
time.sleep(RETRY_INTERVAL)
assert image.state == state
return image
def wait_for_engine_image_ref_count(client, image_name, count):
wait_for_engine_image_creation(client, image_name)
for i in range(RETRY_COUNTS):
image = client.by_id_engine_image(image_name)
if image.refCount == count:
break
time.sleep(RETRY_INTERVAL)
assert image.refCount == count
if count == 0:
assert image.noRefSince != ""
return image
def json_string_go_to_python(str):
return str.replace("u\'", "\"").replace("\'", "\""). \
replace("True", "true").replace("False", "false")
def delete_replica_processes(client, api, volname):
replica_map = {}
volume = client.by_id_volume(volname)
for r in volume.replicas:
replica_map[r.instanceManagerName] = r.name
for rm_name, r_name in replica_map.items():
delete_command = 'longhorn-instance-manager process delete ' + \
'--name ' + r_name
exec_instance_manager(api, rm_name, delete_command)
def crash_replica_processes(client, api, volname, replicas=None,
wait_to_fail=True):
if replicas is None:
volume = client.by_id_volume(volname)
replicas = volume.replicas
for r in replicas:
assert r.instanceManagerName != ""
kill_command = "kill `ps aux | grep '" + r['dataPath'] +\
"' | grep -v grep | awk '{print $2}'`"
exec_instance_manager(api, r.instanceManagerName, kill_command)
if wait_to_fail is True:
for r in replicas:
wait_for_replica_failed(client, volname, r['name'])
def exec_instance_manager(api, im_name, cmd):
exec_cmd = ['/bin/sh', '-c', cmd]
with timeout(seconds=STREAM_EXEC_TIMEOUT,
error_message='Timeout on executing stream read'):
stream(api.connect_get_namespaced_pod_exec,
im_name,
LONGHORN_NAMESPACE, command=exec_cmd,
stderr=True, stdin=False, stdout=True, tty=False)
def wait_for_replica_failed(client, volname, replica_name):
failed = True
for i in range(RETRY_COUNTS):
time.sleep(RETRY_INTERVAL)
failed = True
volume = client.by_id_volume(volname)
for r in volume.replicas:
if r['name'] != replica_name:
continue
if r['running'] or r['failedAt'] == "":
failed = False
break
if r['instanceManagerName'] != "":
im = client.by_id_instance_manager(
r['instanceManagerName'])
if r['name'] in im['instances']:
failed = False
break
if failed:
break
assert failed
def wait_for_replica_running(client, volname, replica_name):
is_running = False
for i in range(RETRY_COUNTS):
volume = client.by_id_volume(volname)
for r in volume.replicas:
if r['name'] != replica_name:
continue
if r['running'] and r['instanceManagerName'] != "":
im = client.by_id_instance_manager(
r['instanceManagerName'])
if r['name'] in im['instances']:
is_running = True
break
if is_running:
break
time.sleep(RETRY_INTERVAL)
assert is_running
@pytest.fixture
def volume_name(request):
return generate_volume_name()
@pytest.fixture
def pvc_name(request):
return generate_volume_name()
@pytest.fixture
def csi_pvc_name(request):
return generate_volume_name()
def generate_volume_name():
return VOLUME_NAME | |
<filename>azure-cis.py
#!/usr/bin/python3
# @Author <NAME>
# Version 1.1
import subprocess
import sys
import datetime
import os
from include import check1,check2,check3,check4,check5,check6,check7,check8,subscription
from html.parser import HTMLParser
################ CSV EXPORT HELPERS ###################
# Use HTML Parser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
# Strip HTML tags
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
# Process a CSV Column: strip HTML tags, wrap with double-quotes, replace double-quotes with single-quotes, replace line breaks with newline
def c(text):
#return '"'+ str(text).strip().replace("<br>", "\n").replace("<b>","'").replace("</b>","'").replace('"',"'") + '",'
return '"'+ str(text).strip().replace("<br>", "\n").replace('"',"'") + '",'
# Process a CSV Row: Add Subscription name column, add newline
subname=""
def r(text):
return '"' + subname + '",' + text + "\n"
################ HTML HEADER ###################
def generate_report(subid,name,cloudname):
global subname
subname=name
html_start = """
<html>
<head>
<title>Azure CIS Benchmark</title>
</head>
<body>
<h1><center>Azure CIS Benchmark for """+name+"""</center></h1>
<font size="-1">
"""
html_end = """
</body>
</html>
"""
start_list="<li><b>\n"
end_list="</h3></li></b>\n"
################ HTML 1.x ##############
content1 = """
<h2>CIS Azure 1.x</h2>
<ul>
"""
content11="1.1 Ensure that multi-factor authentication is enabled for all privileged users"
content12="1.2 Ensure that multi-factor authentication is enabled for all nonprivileged users"
content13="1.3 Ensure that there are no guest users"
content14="1.4 Ensure that 'Allow users to remember multi-factor authentication on devices they trust' is 'Disabled'"
content15="1.5 Ensure that 'Number of methods required to reset' is set to '2'"
content16="1.6 Ensure that 'Number of days before users are asked to re-confirm their authentication information' is not set to '0"
content17="1.7 Ensure that 'Notify users on password resets?' is set to 'Yes'"
content18="1.8 Ensure that 'Notify all admins when other admins reset their password?' is set to 'Yes'"
content19="1.9 Ensure that 'Users can consent to apps accessing company data on their behalf' is set to 'No'"
content110="1.10 Ensure that 'Users can add gallery apps to their Access Panel' is set to 'No'"
content111="1.11 Ensure that 'Users can register applications' is set to 'No'"
content112="1.12 Ensure that 'Guest users permissions are limited' is set to 'Yes'"
content113="1.13 Ensure that 'Members can invite' is set to 'No'"
content114="1.14 Ensure that 'Guests can invite' is set to 'No'"
content115="1.15 Ensure that 'Restrict access to Azure AD administration portal' is set to 'Yes'"
content116="1.16 Ensure that 'Self-service group management enabled' is set to 'No'"
content117="1.17 Ensure that 'Users can create security groups' is set to 'No'"
content118="1.18 Ensure that 'Users who can manage security groups' is set to 'None'"
content119="1.19 Ensure that 'Users can create Office 365 groups' is set to 'No'"
content120="1.20 Ensure that 'Users who can manage Office 365 groups' is set to 'None'"
content121="1.21 Ensure that 'Enable 'All Users' group' is set to 'Yes'"
content122="1.22 Ensure that 'Require Multi-Factor Auth to join devices' is set to 'Yes'"
content123="1.23 Ensure that no custom subscription owner roles are created"
result11=check1.check11(subid)
#sys.exit(0)
result12=check1.check12()
result13=check1.check13()
result14=check1.check14()
result15=check1.check15()
result16=check1.check16()
result17=check1.check17()
result18=check1.check18()
result19=check1.check19()
result110=check1.check110()
result111=check1.check111()
result112=check1.check112()
result113=check1.check113()
result114=check1.check114()
result115=check1.check115()
result116=check1.check116()
result117=check1.check117()
result118=check1.check118()
result119=check1.check119()
result120=check1.check120()
result121=check1.check121()
result122=check1.check122()
result123=check1.check123()
content1_1 = '<h3 id="content11">'+start_list+content11+end_list+result11[0]+'<h3 id="content12">'+start_list+content12+end_list+result12[0]+'<h3 id="content13">'+start_list+content13+end_list+result13[0]
content1_2 = '<h3 id="content11">'+start_list+content14+end_list+result14[0]+'<h3 id="content15">'+start_list+content15+end_list+result15[0]+'<h3 id="content16">'+start_list+content16+end_list+result16[0]
content1_3 = '<h3 id="content17">'+start_list+content17+end_list+result17[0]+'<h3 id="content18">'+start_list+content18+end_list+result18[0]+'<h3 id="content19">'+start_list+content19+end_list+result19[0]
content1_4 = '<h3 id="content110">'+start_list+content110+end_list+result110[0]+'<h3 id="content111">'+start_list+content111+end_list+result111[0]+'<h3 id="content112">'+start_list+content112+end_list+result112[0]
content1_5 = '<h3 id="content113">'+start_list+content113+end_list+result113[0]+'<h3 id="content114">'+start_list+content114+end_list+result114[0]+'<h3 id="content115">'+start_list+content115+end_list+result115[0]
content1_6 = '<h3 id="content116">'+start_list+content116+end_list+result116[0]+'<h3 id="content117">'+start_list+content117+end_list+result117[0]+'<h3 id="content118">'+start_list+content118+end_list+result118[0]
content1_7 = '<h3 id="content119">'+start_list+content119+end_list+result119[0]+'<h3 id="content120">'+start_list+content120+end_list+result120[0]+'<h3 id="content121">'+start_list+content121+end_list+result121[0]
content1_8 = '<h3 id="content122">'+start_list+content122+end_list+result122[0]+'<h3 id="content123">'+start_list+content123+end_list+result123[0]
content1=content1+content1_1+content1_2+content1_3+content1_4+content1_5+content1_6+content1_7+content1_8
################ HTML 2.x ##############
content2 = """
<h2>CIS Azure 2.x</h2>
<ul>
"""
content21="2.1 Ensure that standard pricing tier is selected"
content22="2.2 Ensure that 'Automatic provisioning of monitoring agent' is set to 'On'"
content23="2.3 Ensure that 'System updates' is set to 'On'"
content24="2.4 Ensure that 'Security Configurations' is set to 'On'"
content25="2.5 Ensure that 'Endpoint protection' is set to 'On'"
content26="2.6 Ensure that 'Disk encryption' is set to 'On'"
content27="2.7 Ensure that 'Network security groups' is set to 'On'"
content28="2.8 Ensure that 'Web application firewall' is set to 'On'"
content29="2.9 Ensure that 'Next generation firewall' is set to 'On'"
content210="2.10 Ensure that 'Vulnerability assessment' is set to 'On'"
content211="2.11 Ensure that 'Storage Encryption' is set to 'On'"
content212="2.12 Ensure that 'JIT Network Access' is set to 'On'"
content213="2.13 Ensure that 'Adaptive Application Controls' is set to 'On'"
content214="2.14 Ensure that 'SQL auditing & Threat detection' is set to 'On'"
content215="2.15 Ensure that 'SQL Encryption' is set to 'On'"
content216="2.16 Ensure that 'Security contact emails' is set"
content217="2.17 Ensure that security contact 'Phone number' is set"
content218="2.18 Ensure that 'Send me emails about alerts' is set to 'On'"
content219="2.19 Ensure that 'Send email also to subscription owners' is set to 'On'"
result21=check2.check21()
result22=check2.check22(subid)
content2_1 = '<h3 id="content21">'+start_list+content21+end_list+result21+'<h3 id="content22">'+start_list+content22+end_list+result22[0]+'<h3 id="content23">'+start_list+content23+end_list+result22[1]
content2_2 = '<h3 id="content24">'+start_list+content24+end_list+result22[2]+'<h3 id="content25">'+start_list+content25+end_list+result22[3]+'<h3 id="content26">'+start_list+content26+end_list+result22[4]+'<h3 id="content27">'+start_list+content27+end_list+result22[5]+'<h3 id="content28">'+start_list+content28+end_list+result22[6]
content2_3 = '<h3 id="content29">'+start_list+content29+end_list+result22[7]+'<h3 id="content210">'+start_list+content210+end_list+result22[8]+'<h3 id="content211">'+start_list+content211+end_list+result22[9]
content2_4 = '<h3 id="content212">'+start_list+content212+end_list+result22[10]+'<h3 id="content213">'+start_list+content213+end_list+result22[11]+'<h3 id="content214">'+start_list+content214+end_list+result22[12]
content2_5 = '<h3 id="content215">'+start_list+content215+end_list+result22[13]+'<h3 id="content216">'+start_list+content216+end_list+result22[14]+'<h3 id="content217">'+start_list+content217+end_list+result22[15]
content2_6 = '<h3 id="content218">'+start_list+content218+end_list+result22[16]+'<h3 id="content219">'+start_list+content219+end_list+result22[17]
content2 = content2 + content2_1 + content2_2 + content2_3 + content2_4 + content2_5+ content2_6
############ HTML 3.x ##############
content3 = """
<h2>CIS Azure 3.x</h2>
<ul>
"""
result31=check3.check31()
result32=check3.check32()
result33=check3.check33()
result34=check3.check34()
result35=check3.check35()
result36=check3.check36()
result37=check3.check37()
content31="3.1 Ensure that 'Secure transfer required' is set to 'Enabled'"
content32="3.2 Ensure that 'Storage service encryption' is set to Enabled for Blob Service"
content33="3.3 Ensure that storage account access keys are periodically regenerated"
content34="3.4 Ensure that shared access signature tokens expire within an hour"
content35="3.5 Ensure that shared access signature tokens are allowed only over https"
content36="3.6 Ensure that 'Storage service encryption' is set to Enabled for File Service"
content37="3.7 Ensure that 'Public access level' is set to Private for blob containers"
content3_1 = '<h3 id="content31">'+start_list+content31+end_list+result31[0]
content3_2 = '<h3 id="content32">'+start_list+content32+end_list+result32[0]
content3_3 = '<h3 id="content33">'+start_list+content33+end_list+result33[0]
content3_4 = '<h3 id="content34">'+start_list+content34+end_list+result34+'<h3 id="content35">'+start_list+content35+end_list+result35+'<h3 id="content36">'+start_list+content36+end_list+result36[0]+'<h3 id="content37">'+start_list+content37+end_list+result37[0]
content3 = content3+content3_1+content3_2+content3_3+content3_4
################ HTML 4.x ##############
content4 = """
<h2>CIS Azure 4.x</h2>
<ul>
"""
content411="4.1.1 Ensure that 'Auditing' is set to 'On'"
content412="4.1.2 Ensure that 'Threat Detection' is set to 'On'"
content413="4.1.3 Ensure that 'Threat Detection types' is set to 'All'"
content414="4.1.4 Ensure that 'Send alerts to' is set"
content415="4.1.5 Ensure that 'Email service and co-administrators' is 'Enabled'"
content416="4.1.6 Ensure that 'Auditing' Retention is 'greater than 90 days'"
content417="4.1.7 Ensure that 'Threat Detection' Retention is 'greater than 90 days'"
content418="4.1.8 Ensure that Azure Active Directory Admin is configured"
result41 = check4.check41(subid)
content41_1 = start_list+content411+end_list#+check41()+start_list+content412+end_list+check41()+start_list+content413+end_list+check41()+end_list
content41_2 = start_list+content414+end_list#+check41()+start_list+content415+end_list+check41()+start_list+content416+end_list+check41()+start_list+content417+end_list+check41()+start_list+content418+end_list+check41()
content41 = content41_1+content41_2
content421="4.2.1 Ensure that 'Auditing' is set to 'On'"
content422="4.2.2 Ensure that 'Threat Detection' is set to 'On'"
content423="4.2.3 Ensure that 'Threat Detection types' is set to 'All'"
content424="4.2.4 Ensure that 'Send alerts to' is set"
content425="4.2.5 Ensure that 'Email service and co-administrators' is 'Enabled'"
content426="4.2.6 Ensure that 'Data encryption' is set to 'On'"
content427="4.2.7 Ensure that 'Auditing' Retention is 'greater than 90 days'"
content428="4.2.8 Ensure that 'Threat' Retention is 'greater than 90 days'"
result42 = check4.check42(subid)
content42_1 = '<h3 id="content421">'+start_list+content421+end_list+result42[0][0]+'<h3 id="content422">'+start_list+content422+end_list+result42[1][0]+'<h3 id="content423">'+start_list+content423+end_list+result42[2][0]
content42_2 = '<h3 id="content424">'+start_list+content424+end_list+result42[3][0]+'<h3 id="content425">'+start_list+content425+end_list+result42[4][0]+'<h3 id="content426">'+start_list+content426+end_list+result42[5][0]
content42_3 = '<h3 id="content427">'+start_list+content427+end_list+result42[6][0]+'<h3 id="content428">'+start_list+content428+end_list+result42[7][0]
content42 = content42_1+content42_2+content42_3
content4 = content4 + content41 + content42
################ HTML 5.x ##############
content5 = """
<h2>CIS Azure 5.x</h2>
<ul>
"""
content51="5.1 Ensure that a Log Profile exists"
content52="5.2 Ensure that Activity Log Retention is set 365 days or greater"
content53="5.3 Ensure that Activity Log Alert exists for Create Policy Assignment"
content54="5.4 Ensure that Activity Log Alert exists for Create or Update Network Security Group"
content55="5.5 Ensure that Activity Log Alert exists for Delete Network Security Group"
content56="5.6 Ensure that Activity Log Alert exists for Create or Update Network Security Group Rule"
content57="5.7 Ensure that Activity Log Alert exists for Delete Network Security Group Rule"
content58="5.8 Ensure that Activity Log Alert exists for Create or Update Security Solution"
content59="5.9 Ensure that Activity Log Alert exists for Delete Security Solution"
content510="5.10 Ensure that Activity Log Alert exists for Create or Update SQL Server Firewall Rule"
content511="5.11 Ensure that Activity Log Alert exists for Delete SQL Server Firewall Rule"
content512="5.12 Ensure that Activity Log Alert exists for Update Security Policy"
content513="5.13 Ensure that logging for Azure KeyVault is 'Enabled'"
result5=check5.check50(subid)
content5_1 = '<h3 id="content51">'+start_list+content51+end_list+result5[0]+'<h3 id="content52">'+start_list+content52+end_list+result5[1]+'<h3 id="content53">'+start_list+content53+end_list+result5[2]
content5_2 = '<h3 id="content54">'+start_list+content54+end_list+result5[3]+'<h3 id="content55">'+start_list+content55+end_list+result5[4]+'<h3 id="content56">'+start_list+content56+end_list+result5[5]+'<h3 id="content57">'+start_list+content57+end_list+result5[6]+'<h3 id="content58">'+start_list+content58+end_list+result5[7]
content5_3 = '<h3 id="content59">'+start_list+content59+end_list+result5[8]+'<h3 id="content510">'+start_list+content510+end_list+result5[9]+'<h3 id="content511">'+start_list+content511+end_list+result5[10]
content5_4 = '<h3 id="content512">'+start_list+content512+end_list+result5[11]+'<h3 id="content513">'+start_list+content513+end_list+result5[12]
content50 = content5_1 + content5_2 + content5_3 + content5_4
content5 = content5 + content50
################ HTML 6.x ##############
content6 = """
<h2>CIS Azure | |
%i",
"pixel_samples %i %i",
"auto_light %s",
"auto_render_pass %s %s",
"shadows %i",
"mblur %i"
]
#--------------------------------------------------------------------------
# enums
#--------------------------------------------------------------------------
class kRenderer:
PRMan = 1
Air = 2
ThreeDelight = 3
Velocity = 4
MentalRay = 5
VRay = 6
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(RenderOption, self).__init__()
# remove the block header
header, block = block.partition('\n')[::2]
self._parseHeader(header)
# remove indent from block
block = self._removeIndent(block)
# parse the attributes
rest = self.parseAttributes(block, RenderOption._sBlockFormatting)
# save remaining attributes
self._raw = rest
#--------------------------------------------------------------------------
def __str__(self):
header = ("render %s *" if self.selected else "render %s") % self.name
attributes = self.printAttributes(RenderOption._sBlockFormatting)
block = "%s%s" % (attributes, self._raw)
return "%s\n%s" % (header, self._addIndent(block))
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def _parseHeader(self, header):
"""Render options block header contains render name and selection status.
"""
normal = "render %s"
selected = "render %s *"
# check if render selected
try:
(self.name,) = sscanf(header, selected)
self.selected = True
except IncompleteCaptureError, e:
(self.name,) = sscanf(header, normal)
self.selected = False
#------------------------------------------------------------------------------
# class DynamicsBlock
#------------------------------------------------------------------------------
class DynamicsBlock(Block):
"""Represent the dynamics settings.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"terrain_collisions %d",
"self_collisions %d",
"object_collisions %d",
"rotation_constraints %d",
"spring_forces %d",
"spring_collisions %d",
"drag_forces %d",
"quickstep %d",
"rbd_solver %s" # ode, glowworm
]
#--------------------------------------------------------------------------
# enums
#--------------------------------------------------------------------------
class kSolver:
Ode = 'ode'
GlowWorm = 'glowworm'
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(DynamicsBlock, self).__init__()
self.parseAttributes(block, DynamicsBlock._sBlockFormatting)
#--------------------------------------------------------------------------
def __str__(self):
block = self.printAttributes(DynamicsBlock._sBlockFormatting)
return "Dynamics\n%sEnd dynamics" % self._addIndent(block)
#------------------------------------------------------------------------------
# class FlowBlock
#------------------------------------------------------------------------------
class FlowBlock(Block):
"""Scene flow fields.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"indicators %d x %d",
"gap [%g %g %g] %g %g"
]
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(FlowBlock, self).__init__()
# initialize fields
self.splines = []
self.gaps = []
# setup special parse list
special_list = { "spline" : self._parseSpline }
# parse the attributes
self.parseAttributes(block, FlowBlock._sBlockFormatting, special_list)
#--------------------------------------------------------------------------
def __str__(self):
attributes = self.printAttributes(FlowBlock._sBlockFormatting[:1])
splines = "\n".join(map(str, self.splines))
gaps = self._printGaps() if self.gap else ""
block = "%s%s\n%s" % (attributes, splines, gaps)
return "Flow\n%sEnd flow" % self._addIndent(block)
#--------------------------------------------------------------------------
# helper methods
#--------------------------------------------------------------------------
def _parseSpline(self, block):
"""Collate all of the flow field splines.
"""
self.splines.append(FlowSpline(block))
#--------------------------------------------------------------------------
def _printGaps(self):
formatting = FlowBlock._sBlockFormatting[1]
gaps = [self.gap] if not isinstance(self.gap, list) else self.gap
return "\n".join([formatting % gap for gap in gaps]) + "\n"
#------------------------------------------------------------------------------
# class FlowSpline
#------------------------------------------------------------------------------
class FlowSpline(Block):
"""Spline used to represent a flow field.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sSplineFormatting = "spline %g %g %g %g %d"
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(FlowSpline, self).__init__()
# remove the block header
header, block = block.partition('\n')[::2]
self._parseHeader(header)
# remove indent from block
block = self._removeIndent(block)
# parse the points
self._parsePoints(block)
#--------------------------------------------------------------------------
def __str__(self):
header = FlowSpline._sSplineFormatting % self.spline
block = self._printPoints()
return "%s\n%s" % (header, self._addIndent(block))
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def _parseHeader(self, header):
"""Spline block header contains unknown data and number of points.
"""
formatting = FlowSpline._sSplineFormatting.replace('%g', '%f')
self.spline = sscanf(header, formatting)
#--------------------------------------------------------------------------
def _parsePoints(self, block):
"""Points representing spline.
"""
self.points = []
for line in block.strip('\n').split('\n'):
point = sscanf(line, "[%f %f %f %f %f %f %f %f %f %f]")
self.points.append(point)
#--------------------------------------------------------------------------
def _printPoints(self):
formatting = "[%g %g %g %g %g %g %g %g %g %g]"
points = "\n".join([formatting % point for point in self.points])
return points
#------------------------------------------------------------------------------
# class LaneBlock
#------------------------------------------------------------------------------
class LaneBlock(Block):
"""Scene lanes.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"spline %d %g %g"
]
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(LaneBlock, self).__init__()
self.splines = []
special_list = { "spline" : self._parseSpline }
self.parseAttributes(block, [], special_list)
#--------------------------------------------------------------------------
def __str__(self):
block = "".join(map(str, self.splines))
return "Lane\n%sEnd lane" % self._addIndent(block)
#--------------------------------------------------------------------------
# helper methods
#--------------------------------------------------------------------------
def _parseSpline(self, block):
"""Collate all of the flow field splines.
"""
self.splines.append(LaneSpline(block))
#------------------------------------------------------------------------------
# class LaneSpline
#------------------------------------------------------------------------------
class LaneSpline(Block):
"""Spline used to represent a lane.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sSplineFormatting = "spline %d %g %g"
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(LaneSpline, self).__init__()
# initialize fields
self.points = []
self.tangents = None
# remove the block header
header, block = block.partition('\n')[::2]
self._parseHeader(header)
# remove indent from block
block = self._removeIndent(block)
# parse the points
rest = self._parsePoints(block, self.count)
# parse tangents
self.tangents = None
if rest.startswith("tangents"):
self._parseTangents(rest)
#--------------------------------------------------------------------------
def __str__(self):
header = LaneSpline._sSplineFormatting % (self.count, self.hue, self.width)
points = self._printPoints()
tangents = self._printTangents() if self.tangents else ""
block = "%s\n%s" % (points, tangents)
return "%s\n%s" % (header, self._addIndent(block))
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def _parseHeader(self, header):
"""Spline block header contains point count, hue, and width.
"""
formatting = LaneSpline._sSplineFormatting.replace('%g', '%f')
(self.count, self.hue, self.width) = sscanf(header, formatting)
#--------------------------------------------------------------------------
def _parsePoints(self, block, count):
"""Points representing spline.
"""
lines = block.strip('\n').split('\n')
for line in lines[:count]:
point = sscanf(line, "[%f %f %f %f]")
self.points.append(point)
return "\n".join(lines[count:])
#--------------------------------------------------------------------------
def _printPoints(self):
formatting = "[%g %g %g %g]"
points = "\n".join([formatting % point for point in self.points])
return points
#--------------------------------------------------------------------------
def _parseTangents(self, block):
"""Tangents representing spline.
"""
self.tangents = []
for line in block.strip('\n').split('\n')[1:]:
tangent = sscanf(line, "[%f %f %f][%f %f %f]")
self.tangents.append(tangent)
#--------------------------------------------------------------------------
def _printTangents(self):
formatting = "[%g %g %g][%g %g %g]"
tangents = "\n".join([formatting % tangent for tangent in self.tangents])
return "tangents\n%s\n" % tangents
#------------------------------------------------------------------------------
# class SimsBlock
#------------------------------------------------------------------------------
class SimsBlock(Block):
"""Simulation options.
"""
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with raw block scene data.
"""
super(SimsBlock, self).__init__()
self.sims = []
special_list = { "sim" : self._parseSim }
self.parseAttributes(block, [], special_list)
#--------------------------------------------------------------------------
def __str__(self):
block = "".join(map(str, self.sims))
return "Sims\n%sEnd sims" % self._addIndent(block)
#--------------------------------------------------------------------------
# helper methods
#--------------------------------------------------------------------------
def _parseSim(self, block):
"""Collate all of the sim enties for the scene.
"""
self.sims.append(SimOption(block))
#------------------------------------------------------------------------------
# class SimOption
#------------------------------------------------------------------------------
class SimOption(Block):
"""Simulation option settings.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"frames %d %d %d"
]
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(SimOption, self).__init__()
# initialize fields
self.process = None
self.input = None
self.output = None
# remove the block header
header, block = block.partition('\n')[::2]
self._parseHeader(header)
# remove indent from block
block = self._removeIndent(block)
# setup special parse list
special_list = {
"process" : self._parseProcess,
"input" : self._parseInput,
"output" : self._parseOutput
}
# setup skip list
skip_list = ['end']
# parse the attributes
self.parseAttributes(
block, SimOption._sBlockFormatting, special_list, skip_list)
#--------------------------------------------------------------------------
def __str__(self):
header = ("sim %s *" if self.selected else "sim %s") % self.name
attributes = self.printAttributes(SimOption._sBlockFormatting)
process = str(self.process) if self.process else ""
input = str(self.input) if self.input else ""
output = str(self.output) if self.output else ""
block = "%s%s%s%s" % (attributes, process, input, output)
return "%s\n%send sim\n" % (header, self._addIndent(block))
#--------------------------------------------------------------------------
# helper methods
#--------------------------------------------------------------------------
def _parseHeader(self, header):
"""Sim block header contains sim name and selection status.
"""
normal = "sim %s"
selected = "sim %s *"
# check if camera selected
try:
(self.name,) = sscanf(header, selected)
self.selected = True
except IncompleteCaptureError, e:
(self.name,) = sscanf(header, normal)
self.selected = False
#--------------------------------------------------------------------------
def _parseProcess(self, block):
"""Parse sim process options.
"""
self.process = SimOptionProcess(block)
#--------------------------------------------------------------------------
def _parseInput(self, block):
"""Parse sim input options.
"""
self.input = SimOptionInput(block)
#--------------------------------------------------------------------------
def _parseOutput(self, block):
"""Parse sim output options.
"""
self.output = SimOptionOutput(block)
#------------------------------------------------------------------------------
# class SimOptionProcess
#------------------------------------------------------------------------------
class SimOptionProcess(Block):
"""Simulation process options.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"brain",
"cloth",
"hair",
"schedule"
]
#--------------------------------------------------------------------------
# enums
#--------------------------------------------------------------------------
class kProcess:
Brain = 'brain'
Cloth = 'cloth'
Hair = 'hair'
Schedule = 'schedule'
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(SimOptionProcess, self).__init__()
block = self._removeIndent(block.partition('\n')[2])
self.parseAttributes(block, SimOptionProcess._sBlockFormatting)
#--------------------------------------------------------------------------
def __str__(self):
block = self.printAttributes(SimOptionProcess._sBlockFormatting)
return "process\n" + self._addIndent(block)
#------------------------------------------------------------------------------
# class SimOptionInput
#------------------------------------------------------------------------------
class SimOptionInput(Block):
"""Simulation input options.
"""
#--------------------------------------------------------------------------
# statics
#--------------------------------------------------------------------------
_sBlockFormatting = [
"sims %s %s", # amc, amc_gz, apf, apf_gz, maya
"cloth %s %s", # mgeo, obj
"hair %s",
"camera %s"
]
#--------------------------------------------------------------------------
# methods
#--------------------------------------------------------------------------
def __init__(self, block):
"""Initialize self with scene data.
"""
super(SimOptionInput, self).__init__()
block = self._removeIndent(block.partition('\n')[2])
self.parseAttributes(block, SimOptionInput._sBlockFormatting)
#--------------------------------------------------------------------------
def __str__(self):
block = self.printAttributes(SimOptionInput._sBlockFormatting)
return "input\n" + self._addIndent(block)
#------------------------------------------------------------------------------
# class | |
# import rekall
from esper.rekall import *
from rekall.video_interval_collection import VideoIntervalCollection
from rekall.interval_list import Interval, IntervalList
from rekall.temporal_predicates import *
from rekall.spatial_predicates import *
from rekall.parsers import in_array, bbox_payload_parser
from rekall.merge_ops import payload_plus
from rekall.payload_predicates import payload_satisfies
from rekall.list_predicates import length_exactly
# import caption search
# from esper.captions import *
# import query sets
from query.models import Video, Face, FaceIdentity, FaceGender
from django.db.models import F, Q
# import esper utils and widgets for selection
from esper.prelude import *
from esper.stdlib import *
from IPython.display import display, clear_output
import ipywidgets as widgets
import numpy as np
import random
import os
import pickle
import tempfile
from tqdm import tqdm
import multiprocessing
from pydub import AudioSegment
import pysrt
import re
import cv2
import shutil
import multiprocessing as mp
# ============== Basic help functions ==============
def par_for_process(function, param_list, num_workers=32):
num_jobs = len(param_list)
print("Total number of %d jobs" % num_jobs)
if num_jobs == 0:
return
if num_jobs <= num_workers:
num_workers = num_jobs
num_jobs_p = 1
else:
num_jobs_p = math.ceil(1. * num_jobs / num_workers)
print("{} workers and {} jobs per worker".format(num_workers, num_jobs_p))
process_list = []
for i in range(num_workers):
if i != num_workers - 1:
param_list_p = param_list[i*num_jobs_p : (i+1)*num_jobs_p]
else:
param_list_p = param_list[i*num_jobs_p : ]
p = mp.Process(target=function, args=(param_list_p,))
process_list.append(p)
for p in process_list:
p.start()
# for p in process_list:
# p.join()
def second2time(second, sep=','):
h, m, s, ms = int(second) // 3600, int(second % 3600) // 60, int(second) % 60, int((second - int(second)) * 1000)
return '{:02d}:{:02d}:{:02d}{:s}{:03d}'.format(h, m, s, sep, ms)
def time2second(time):
return time[0]*3600 + time[1]*60 + time[2] + time[3] / 1000.0
def count_intervals(intrvlcol):
num_intrvl = 0
for intrvllist in intrvlcol.get_allintervals().values():
num_intrvl += intrvllist.size()
return num_intrvl
def intrvlcol2list(intrvlcol, with_duration=True):
interval_list = []
for video_id, intrvllist in intrvlcol.get_allintervals().items():
if with_duration:
video = Video.objects.filter(id=video_id)[0]
for i in intrvllist.get_intervals():
if with_duration:
interval_list.append((video_id, i.start, i.end, (i.end - i.start) / video.fps))
else:
interval_list.append((video_id, i.start, i.end))
print("Get {} intervals from interval collection".format(len(interval_list)))
return interval_list
def interval2result(intervals):
materialized_result = [
{'video': video_id,
# 'track': t.id,
'min_frame': sfid,
'max_frame': efid }
for video_id, sfid, efid, duration in intervals ]
count = len(intervals)
groups = [{'type': 'flat', 'label': '', 'elements': [r]} for r in materialized_result]
return {'result': groups, 'count': count, 'type': 'Video'}
# ============== Video audio operations ==============
def stitch_video_temporal(intervals, out_path, out_duration=None, dilation=None,
speed=None, width=640, height=480):
intervals = intervals.copy()
def download_video_clip(i):
video_id, sfid, efid = intervals[i][:3]
video = Video.objects.filter(id=video_id)[0]
start, end = 1. * sfid / video.fps, 1. * efid / video.fps
video_path = video.download(segment=(start, end))
if i == len(intervals) - 1 and not dilation is None:
video_path = mute_video(video_path)
return video_path
in_duration = sum([i[-1] for i in intervals])
if dilation is None or dilation < 0.1:
dilation = None
if not dilation is None:
video_id, sfid, efid = intervals[-1][:3]
video = Video.objects.filter(id=video_id)[0]
intervals.append((video_id, efid, efid + int(dilation*video.fps), dilation))
# download clips for each phrase
clip_paths = par_for(download_video_clip, [i for i in range(len(intervals))])
# concat phrase clips
tmp_path = tempfile.NamedTemporaryFile(suffix='.mp4').name
if dilation is None and len(intervals) > 1:
concat_videos(clip_paths, tmp_path, width=width, height=height)
elif not dilation is None and len(intervals) > 2:
concat_videos(clip_paths[:-1], tmp_path, width=width, height=height)
else:
tmp_path = clip_paths[0]
# global change sentence speed
if not out_duration is None:
speed = in_duration / out_duration
print(in_duration, out_duration, speed)
speed = max(0.5, speed)
speed = min(2.0, speed)
tmp_path2 = tempfile.NamedTemporaryFile(suffix='.mp4').name
cmd = 'ffmpeg -y -i {} -filter_complex "[0:v]setpts={}*PTS[v];[0:a]atempo={}[a]" -map "[v]" -map "[a]" {}' \
.format(tmp_path, 1 / speed, speed, tmp_path2)
os.system(cmd)
tmp_path = tmp_path2
# concat the dilation clip
if not dilation is None:
concat_videos([tmp_path, clip_paths[-1]], out_path, width=width, height=height)
else:
shutil.move(tmp_path, out_path)
def make_montage_t(args):
(videos, frames, kwargs) = args
return make_montage(videos, frames, **kwargs)
def stitch_video_spatial(intervals, out_path, align=False, **kwargs):
'''
Stitch video into live montage
@intervals: list of (video_id, start_frame_id, end_frame_id)
@out_path: output video path
@align: if true, adjust the speed of each video clip, so that each clip's duration equals to the median of them
@kwargs: params for calling make_montage()
'''
def gcd(a, b):
return gcd(b, a % b) if b else a
id2video = {i[0]: Video.objects.filter(id=i[0])[0] for i in intervals}
videos = [id2video[i[0]] for i in intervals]
fps = reduce(gcd, [int(math.ceil(v.fps)) for v in videos])
# print('gcd fps', fps)
nframes = []
for i in intervals:
video_id, sfid, efid = i[:3]
n = (efid - sfid) / math.ceil(id2video[video_id].fps) * fps
nframes.append(int(n))
if align:
nframe_mix = np.median(nframes).astype(int)
else:
nframe_mix = max(nframes)
kwargs_list = []
for i in range(nframe_mix):
frames = []
for idx, intrv in enumerate(intervals):
video_id, sfid, efid = intrv[:3]
fid_shift = int(math.ceil(id2video[video_id].fps) / fps) * i
if align:
fid_shift = int(round(1. * fid_shift / nframe_mix * nframes[idx]))
frames.append(fid_shift + sfid)
# print(frames)
kwargs_list.append((videos, frames, kwargs))
first = make_montage_t(kwargs_list[0])
vid = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'XVID'), fps,
(first.shape[1], first.shape[0]))
frames = par_for(
make_montage_t, kwargs_list,
workers=16,
process=True)
for frame in tqdm(frames):
vid.write(frame)
vid.release()
def mix_audio(intervals, out_path, decrease_volume=3, align=False):
def download_audio_clip(i):
video_id, sfid, efid = intervals[i][:3]
video = Video.objects.filter(id=video_id)[0]
video_path = video.download(segment=(1.*sfid/video.fps, 1.*efid/video.fps))
if align:
speed = durations[i] / duration_mix
speed = max(0.5, speed)
speed = min(2.0, speed)
# print(speed)
tmp_path = tempfile.NamedTemporaryFile(suffix='.mp4').name
cmd = 'ffmpeg -i {} -filter:a "atempo={}" -vn {}'.format(video_path, speed, tmp_path)
# print(cmd)
os.system(cmd)
video_path = tmp_path
return AudioSegment.from_file(video_path, format="mp4")
durations = []
for i in intervals:
video_id, sfid, efid = i[:3]
video = Video.objects.filter(id=video_id)[0]
d = (efid - sfid) / video.fps
durations.append(d)
duration_mix = np.median(durations)
print("Audio clip duration: min=%.3fs max=%.3fs" % (min(durations), max(durations)))
audios = par_for(download_audio_clip, [i for i in range(len(intervals))])
audio_mix = AudioSegment.silent(duration=int(duration_mix*1000))
for audio in audios:
audio_mix = audio_mix.overlay(audio)
audio_mix = audio_mix - decrease_volume
audio_mix.export(out_path, format="wav")
def concat_video_audio(video_path, audio_path, out_path):
tmp_path = tempfile.NamedTemporaryFile(suffix='.avi').name
cmd_merge = 'ffmpeg -y -i {} -i {} -c:v copy -c:a aac -strict experimental {}' \
.format(video_path, audio_path, tmp_path)
cmd_avi2mp4 = 'ffmpeg -y -i {} -c:a aac -b:a 128k -c:v libx264 -crf 23 {}'.format(tmp_path, out_path)
os.system(cmd_merge)
os.system(cmd_avi2mp4)
def replace_audio(video_path, audio_path, out_path):
cmd = 'ffmpeg -y -i {} -i {} -c:v copy -map 0:v:0 -map 1:a:0 {}' \
.format(video_path, audio_path, out_path)
os.system(cmd)
def add_bgm(video_path, bgm_path, out_path, bgm_decrease=2):
audio_ori = AudioSegment.from_file(video_path, format='mp4')
audio_bgm = AudioSegment.from_file(bgm_path, format=bgm_path[-3:])
audio_mix = audio_ori.overlay(audio_bgm - bgm_decrease)
tmp_path = tempfile.NamedTemporaryFile(suffix='.wav').name
audio_mix.export(tmp_path, format='wav')
replace_audio(video_path, tmp_path, out_path)
def mute_video(video_path):
audio = AudioSegment.from_file(video_path, format='mp4')
silence = AudioSegment.silent(duration=len(audio))
silence_path = tempfile.NamedTemporaryFile(suffix='.wav').name
silence.export(silence_path, format='wav')
out_path = tempfile.NamedTemporaryFile(suffix='.mp4').name
replace_audio(video_path, silence_path, out_path)
return out_path
def create_silent_clip(person_intrvlcol, out_path, out_duration):
intervals = []
for video_id, intrvllist in intrvlcol.get_allintervals().items():
video = Video.objects.filter(id=video_id)[0]
for i in intrvllist.get_intervals():
duration = (i.end - i.start) / video.fps
if duration > out_duration:
intervals.append((video, i.start / video.fps, i.end / video.fps))
if len(intervals) > 10:
break
video, start, end = random.choice(intervals)
video_path = video.download(segment=(start, start + out_duration))
video_path = mute_video(video_path)
return video_path
# ============== Queries with rekall ==============
def get_person_intrvlcol(person_name, **kwargs):
# if video_ids is None:
# videos = Video.objects.filter(threeyears_dataset=True)
# video_ids = [video.id for video in videos]
# faceIDs = FaceIdentity.objects \
# .annotate(video_id=F("face__frame__video_id")) \
# .annotate(shot_boundary=F("face__frame__shot_boundary"))
# if not video_ids is None:
# faceIDs = faceIDs.filter(video_id__in=video_ids, shot_boundary=True) \
if kwargs['labeler'] == 'old': # old labeler model
faceIDs = FaceIdentity.objects \
.exclude(face__shot__isnull=True) \
.filter(Q(labeler__name='face-identity-converted:'+person_name.lower()) |
Q(labeler__name='face-identity:'+person_name.lower()) ) \
.filter(probability__gt=0.9) \
.annotate(height=F("face__bbox_y2") - F("face__bbox_y1"))
if 'large_face' in kwargs:
faceIDs = faceIDs.filter(height__gte=0.3)
person_intrvllists = qs_to_intrvllists(
faceIDs.annotate(video_id=F("face__shot__video_id"))
.annotate(shot_id=F("face__shot_id"))
.annotate(min_frame=F("face__shot__min_frame"))
.annotate(max_frame=F("face__shot__max_frame")),\
schema={
'start': 'min_frame',
'end': 'max_frame',
'payload': 'shot_id'
})
person_intrvlcol = VideoIntervalCollection(person_intrvllists)
else: # new labeler model
faceIDs = FaceIdentity.objects \
.filter(face__frame__shot_boundary=False) \
.filter(Q(labeler__name='face-identity-converted:'+person_name.lower()) |
Q(labeler__name='face-identity:'+person_name.lower()) ) \
.filter(probability__gt=0.9) \
.annotate(height=F("face__bbox_y2") - F("face__bbox_y1"))
if 'large_face' in kwargs:
faceIDs = faceIDs.filter(height__gte=0.3)
person_intrvllists_raw = qs_to_intrvllists(
faceIDs.annotate(video_id=F("face__frame__video_id"))
.annotate(frame_id=F("face__frame__number"))
.annotate(min_frame=F("face__frame__number"))
.annotate(max_frame=F("face__frame__number") + 1),\
schema={
'start': 'min_frame',
'end': 'max_frame',
'payload': 'frame_id'
})
# dilate and coalesce
person_intrvllists = {}
for video_id, intrvllist in person_intrvllists_raw.items():
video = Video.objects.filter(id=video_id)[0]
person_intrvllists[video_id] = intrvllist.dilate(int(video.fps*1.6)).coalesce()
person_intrvlcol = VideoIntervalCollection(person_intrvllists)
print("Get {} intervals for person {}".format(count_intervals(person_intrvlcol), person_name))
return person_intrvlcol
def get_caption_intrvlcol(phrase, video_ids=None):
results = phrase_search(phrase, video_ids)
if video_ids == None:
videos = {v.id: v for v in Video.objects.all()}
else:
videos = {v.id: v for v in Video.objects.filter(id__in=video_ids).all()}
def convert_time(k, t):
return int(t * videos[k].fps)
flattened = [
(doc.id, convert_time(doc.id, p.start), convert_time(doc.id, p.end))
for doc in results
for p in doc.postings
]
phrase_intrvllists = {}
for video_id, t1, t2 in | |
and expired
url = '/api/notifications/expired/?listing=160' # ID 160 belongs to Stroke play
response = APITestHelper.request(self, url, 'GET', username='jones', status_code=200)
notification_list = self._format_notification_response(response)
expected = ['160-listing-asimplelistingtest']
self.assertEqual(expected, notification_list)
def test_all_expired_notifications_listing_filter_user_unauthorized(self):
url = '/api/notifications/expired/?listing=1'
APITestHelper.request(self, url, 'GET', username='jones', status_code=403)
# TODO: test_all_notifications_listing_filter (rivera 20160617)
def test_create_system_notification(self):
data = {'expires_date': '2016-09-01T15:45:55.322421Z',
'message': 'a simple test'}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], data['message'])
self.assertEqual(response.data['notification_type'], 'system')
def test_create_system_notification_unauthorized_user(self):
# test_create_system_notification_unauthorized_user
# test unauthorized user - only org stewards and above can create
data = {'expires_date': '2016-09-01T15:45:55.322421Z',
'message': 'a simple test'}
url = '/api/notification/'
APITestHelper.request(self, url, 'POST', data=data, username='jones', status_code=403)
def test_update_system_notification(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now)}
url = '/api/notification/1/'
APITestHelper.request(self, url, 'PUT', data=data, username='wsmith', status_code=200)
# TODO: Verify expires_date
def test_update_system_notification_unauthorized_user(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now)}
url = '/api/notification/1/'
APITestHelper.request(self, url, 'PUT', data=data, username='jones', status_code=403)
# TODO below test should work when permission gets refactored (rivera 20160620)
@skip("should work permissions gets refactored (rivera 20160620)")
def test_update_system_notification_unauthorized_org_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now)}
url = '/api/notification/1/'
APITestHelper.request(self, url, 'PUT', data=data, username='wsmith', status_code=403)
def test_create_listing_notification_app_mall_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple listing test',
'listing': {
'id': 1
}}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], 'a simple listing test')
self.assertEqual(response.data['notification_type'], 'listing')
self.assertEqual(response.data['listing']['id'], 1)
self.assertEqual(response.data['agency'], None)
self.assertTrue('expires_date' in data)
def test_create_listing_notification_invalid_format_all_groups(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple listing test',
'listing': {'invalid': 1}
}
url = '/api/notification/'
usernames = ['bigbrother', 'wsmith', 'jones']
for username in usernames:
response = APITestHelper.request(self, url, 'POST', data=data, username=username, status_code=400)
self.assertEqual(response.data, ExceptionUnitTestHelper.validation_error("{'non_field_errors': ['Valid Listing ID is required']}"))
def test_create_listing_notification_invalid_id_all_groups(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple listing test',
'listing': {'id': -1}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=400)
usernames = ['bigbrother', 'wsmith', 'jones']
for username in usernames:
response = APITestHelper.request(self, url, 'POST', data=data, username=username, status_code=400)
self.assertEqual(response.data, ExceptionUnitTestHelper.validation_error("{'non_field_errors': ['Could not find listing']}"))
def test_create_listing_agency_notification_app_mall_steward_invalid(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple listing test',
'listing': {'id': 1},
'agency': {'id': 1}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=400)
self.assertEqual(response.data, ExceptionUnitTestHelper.validation_error('{\'non_field_errors\': ["Notifications can only be one type. ''Input: [\'listing\', \'agency\']"]}'))
def test_create_listing_notification_org_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple listing test',
'listing': {'id': 1}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='wsmith', status_code=201)
self.assertEqual(response.data['message'], 'a simple listing test')
self.assertEqual(response.data['notification_type'], 'listing')
self.assertEqual(response.data['listing']['id'], 1)
self.assertEqual(response.data['agency'], None)
self.assertTrue('expires_date' in data)
def test_create_agency_notification_app_mall_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'A Simple Agency Test',
'agency': {'id': 1}}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], 'A Simple Agency Test')
self.assertEqual(response.data['notification_type'], 'agency')
self.assertEqual(response.data['agency']['id'], 1)
self.assertEqual(response.data['listing'], None)
self.assertTrue('expires_date' in data)
def test_create_agency_notification_app_mall_steward_invalid_format(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple agency test',
'agency': {'invalid': 1}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=400)
self.assertEqual(response.data, ExceptionUnitTestHelper.validation_error("{'non_field_errors': ['Valid Agency ID is required']}"))
def test_create_agency_notification_app_mall_steward_invalid_id(self):
now = datetime.datetime.now(pytz.utc)
data = {'expires_date': str(now),
'message': 'a simple agency test',
'agency': {'id': -1}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=400)
self.assertEqual(response.data, ExceptionUnitTestHelper.validation_error("{'non_field_errors': ['Could not find agency']}"))
# TODO: test_create_agency_notification_org_steward (rivera 20160617)
# TODO: test_create_agency_notification_org_steward_invalid (rivera 20160617)
# TODO: test_create_agency_notification_user_unauthorized (rivera 20160617)
def test_create_peer_notification_app_mall_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {"expires_date": str(now),
"message": "A Simple Peer to Peer Notification",
"peer": {
"user": {
"username": "jones"
}}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], 'A Simple Peer to Peer Notification')
self.assertEqual(response.data['notification_type'], 'peer')
self.assertEqual(response.data['agency'], None)
self.assertEqual(response.data['listing'], None)
self.assertEqual(response.data['peer'], {'user': {'username': 'jones'}})
self.assertTrue('expires_date' in data)
def test_create_review_request_notification_app_mall_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {
"expires_date": str(now),
"message": "Please review your agency's apps and make sure their information is up to date",
"notification_type": "StewardAppNotification"
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], "Please review your agency's apps and make sure their information is up to date")
self.assertEqual(response.data['notification_type'], 'system')
self.assertEqual(response.data['notification_subtype'], 'review_request')
self.assertEqual(response.data['agency'], None)
self.assertEqual(response.data['listing'], None)
self.assertEqual(response.data['peer'], None)
self.assertTrue('expires_date' in data)
@skip("should work when data script gets refactored so org stewards cant create system notifications (semesky 20171102)")
def test_create_review_request_notification_unauthorized_org_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {
"expires_date": str(now),
"message": "Please review your agency's apps and make sure their information is up to date",
"notification_type": "StewardAppNotification"
}
url = '/api/notification/'
APITestHelper.request(self, url, 'POST', data=data, username='wsmith', status_code=403)
def test_create_review_request_notification_unauthorized_user(self):
now = datetime.datetime.now(pytz.utc)
data = {
"expires_date": str(now),
"message": "Please review your agency's apps and make sure their information is up to date",
"notification_type": "StewardAppNotification"
}
url = '/api/notification/'
APITestHelper.request(self, url, 'POST', data=data, username='jones', status_code=403)
@skip("should work when data script gets refactored (rivera 20160620)")
def test_create_peer_bookmark_notification_app_mall_steward(self):
now = datetime.datetime.now(pytz.utc)
data = {
"expires_date": str(now),
"message": "A Simple Peer to Peer Notification",
"peer": {
"user": {
"username": "jones"
},
"folder_name": "folder"
}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], 'A Simple Peer to Peer Notification')
self.assertEqual(response.data['notification_type'], 'peer_bookmark')
self.assertEqual(response.data['agency'], None)
self.assertEqual(response.data['listing'], None)
self.assertTrue('expires_date' in data)
def test_create_review_notification(self):
# Check notifications for new review notification
url = '/api/self/notification/'
response = APITestHelper.request(self, url, 'GET', username='bigbrother', status_code=200)
notification_count = len(response.data)
# Review for Lamprey listing owned by bigbrother
review_data = {
"listing": 91,
"rate": 5,
"text": "This is a review for the listing"
}
url = '/api/listing/91/review/'
response = APITestHelper.request(self, url, 'POST', data=review_data, username='jones', status_code=201)
# Check notifications for new review notification
url = '/api/self/notification/'
response = APITestHelper.request(self, url, 'GET', username='bigbrother', status_code=200)
self.assertEqual(len(response.data), notification_count + 1)
self.assertEqual(response.data[0]['entity_id'], review_data['listing'])
self.assertEqual(response.data[0]['author']['user']['username'], 'jones')
self.assertEqual(response.data[0]['listing']['title'], 'Lamprey')
self.assertEqual(response.data[0]['message'], 'A user has rated listing <b>Lamprey</b> 5 stars')
def test_create_review_response(self):
# Review for Lamprey listing owned by bigbrother
review_data = {
"listing": 91,
"rate": 5,
"text": "This is a review for the listing"
}
url = '/api/listing/91/review/'
response = APITestHelper.request(self, url, 'POST', data=review_data, username='jones', status_code=201)
# Create response to the review created
review_response_data = {
"listing": 91,
"rate": 1,
"text": "This is a response to a review",
"review_parent": response.data['id']
}
url = '/api/listing/91/review/'
APITestHelper.request(self, url, 'POST', data=review_response_data, username='jones', status_code=201)
# Verify no notifications has been created for creating a review response
url = '/api/self/notification/'
response = APITestHelper.request(self, url, 'GET', username='bigbrother', status_code=200)
self.assertEqual(response.data[0]['entity_id'], review_data['listing'])
self.assertEqual(response.data[0]['author']['user']['username'], 'jones')
self.assertEqual(response.data[0]['listing']['title'], 'Lamprey')
self.assertEqual(response.data[0]['message'], 'A user has rated listing <b>Lamprey</b> 5 stars')
# TODO test_create_peer_notification_invalid (rivera 20160617)
# TODO test_create_peer_bookmark_notification (rivera 20160617)
def _compare_library(self, usernames_list):
usernames_list_actual = {}
for username, ids_list in usernames_list.items():
url = '/api/self/library/'
response = APITestHelper.request(self, url, 'GET', username=username, status_code=200)
before_notification_ids = ['{}-{}'.format(entry['listing']['title'], entry['folder']) for entry in response.data]
usernames_list_actual[username] = before_notification_ids
for username, ids_list in usernames_list.items():
before_notification_ids = usernames_list_actual[username]
self.assertEqual(sorted(ids_list), sorted(before_notification_ids), 'Checking for {}'.format(username))
def _compare_user_notification(self, notification_user_list):
usernames_list = notification_user_list
for username, ids_list in usernames_list.items():
url = '/api/self/notification/'
response = APITestHelper.request(self, url, 'GET', username=username, status_code=200)
before_notification_ids = ['{}-{}'.format(entry.get('notification_type'), ''.join(entry.get('message').split())) for entry in response.data]
self.assertEqual(ids_list, before_notification_ids, 'Comparing Notifications for {}'.format(username))
def test_create_restore_bookmark_notification_integration(self):
"""
test_create_restore_bookmark_notification_integration
Setup initial bookmark / folders for bigbrother
Create notification that folder Instruments has been deleted
Delete Instruments folder
Restore Instruments folder
"""
# Library for user
user_library = {}
user_library['bigbrother'] = self.user_library_bigbrother
# Compare Notifications for user
user_notifications_list = {}
user_notifications_list['bigbrother'] = self.self_notifications_bigbrother
usernames_list_main = user_notifications_list
usernames_list_actual = {}
for username, ids_list in user_notifications_list.items():
url = '/api/self/notification/'
response = APITestHelper.request(self, url, 'GET', username=username, status_code=200)
before_notification_ids = ['{}-{}'.format(entry.get('notification_type'), ''.join(entry.get('message').split())) for entry in response.data]
usernames_list_actual[username] = before_notification_ids
for username, ids_list in user_notifications_list.items():
before_notification_ids = usernames_list_actual[username]
self.assertEqual(ids_list, before_notification_ids, 'Checking for {}'.format(username))
self._compare_library(user_library)
# Create Bookmark Notification
bookmark_notification_ids = []
bookmark_notification_ids_raw = []
for i in range(3):
now = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
data = {
'expires_date': str(now),
'message': 'restore folder Instruments',
'peer': {
'user': {
'username': 'bigbrother',
},
'folder_name': 'Instruments',
'deleted_folder': True
}
}
url = '/api/notification/'
response = APITestHelper.request(self, url, 'POST', data=data, username='bigbrother', status_code=201)
self.assertEqual(response.data['message'], 'restore folder Instruments')
self.assertEqual(response.data['notification_type'], 'restore_bookmark')
self.assertEqual(response.data['agency'], None)
self.assertEqual(response.data['listing'], None)
peer_data = {'user': {'username': 'bigbrother'}, 'folder_name': 'Instruments', 'deleted_folder': True} # '_bookmark_listing_ids': [3, 4]}
self.assertEqual(response.data['peer'], peer_data)
self.assertTrue('expires_date' in data)
bookmark_notification_ids.append('{}-{}'.format(response.data['notification_type'], ''.join(response.data['message'].split())))
bookmark_notification_ids_raw.append(response.data['id'])
# Compare Notifications for users
| |
<gh_stars>0
from collections import OrderedDict, defaultdict
import random
"""
Helper functions and classes involving graph structures.
Copied from Daza & Cochez (2020).
"""
def _reverse_relation(relation):
return (relation[-1], relation[1], relation[0])
def _reverse_edge(edge):
return (edge[-1], _reverse_relation(edge[1]), edge[0])
class Formula():
def __init__(self, query_type, rels):
self.query_type = query_type
self.target_mode = rels[0][0]
self.rels = rels
if query_type == "1-chain" or query_type == "2-chain" or query_type == "3-chain":
self.anchor_modes = (rels[-1][-1],)
elif query_type == "2-inter" or query_type == "3-inter":
self.anchor_modes = tuple([rel[-1] for rel in rels])
elif query_type == "3-inter_chain":
self.anchor_modes = (rels[0][-1], rels[1][-1][-1])
elif query_type == "3-chain_inter":
self.anchor_modes = (rels[1][0][-1], rels[1][1][-1])
@staticmethod
def flatten(S):
if len(S) == 0:
return S
if isinstance(S[0], tuple):
return Formula.flatten(S[0]) + Formula.flatten(S[1:])
return S[:1] + Formula.flatten(S[1:])
def get_rels(self):
flat_rels = Formula.flatten(self.rels)
rels = []
for i in range(0, len(flat_rels), 3):
rels.append(tuple(flat_rels[i:i+3]))
return rels
def get_nodes(self):
flat_rels = Formula.flatten(self.rels)
variables = []
for i in range(0, len(flat_rels), 3):
variables.extend([flat_rels[i], flat_rels[i+2]])
return variables
def __hash__(self):
return hash((self.query_type, self.rels))
def __eq__(self, other):
return ((self.query_type, self.rels)) == ((other.query_type, other.rels))
def __neq__(self, other):
return ((self.query_type, self.rels)) != ((other.query_type, other.rels))
def __str__(self):
return self.query_type + ": " + str(self.rels)
class Query():
def __init__(self, query_graph, neg_samples, hard_neg_samples, neg_sample_max=100, keep_graph=False):
query_type = query_graph[0]
if query_type == "1-chain" or query_type == "2-chain" or query_type == "3-chain":
self.formula = Formula(query_type, tuple([query_graph[i][1] for i in range(1, len(query_graph))]))
self.anchor_nodes = (query_graph[-1][-1],)
elif query_type == "2-inter" or query_type == "3-inter":
self.formula = Formula(query_type, tuple([query_graph[i][1] for i in range(1, len(query_graph))]))
self.anchor_nodes = tuple([query_graph[i][-1] for i in range(1, len(query_graph))])
elif query_type == "3-inter_chain":
self.formula = Formula(query_type, (query_graph[1][1], (query_graph[2][0][1], query_graph[2][1][1])))
self.anchor_nodes = (query_graph[1][-1], query_graph[2][-1][-1])
elif query_type == "3-chain_inter":
self.formula = Formula(query_type, (query_graph[1][1], (query_graph[2][0][1], query_graph[2][1][1])))
self.anchor_nodes = (query_graph[2][0][-1], query_graph[2][1][-1])
self.target_node = query_graph[1][0]
if keep_graph:
self.query_graph = query_graph
else:
self.query_graph = None
if not neg_samples is None:
self.neg_samples = list(neg_samples) if len(neg_samples) < neg_sample_max else random.sample(neg_samples, neg_sample_max)
else:
self.neg_samples = None
if not hard_neg_samples is None:
self.hard_neg_samples = list(hard_neg_samples) if len(hard_neg_samples) <= neg_sample_max else random.sample(hard_neg_samples, neg_sample_max)
else:
self.hard_neg_samples = None
def contains_edge(self, edge):
if self.query_graph is None:
raise Exception("Can only test edge contain if graph is kept. Reinit with keep_graph=True")
edges = self.query_graph[1:]
if "inter_chain" in self.query_graph[0] or "chain_inter" in self.query_graph[0]:
edges = (edges[0], edges[1][0], edges[1][1])
return edge in edges or (edge[1], _reverse_relation(edge[1]), edge[0]) in edges
def get_edges(self):
if self.query_graph is None:
raise Exception("Can only test edge contain if graph is kept. Reinit with keep_graph=True")
edges = self.query_graph[1:]
if "inter_chain" in self.query_graph[0] or "chain_inter" in self.query_graph[0]:
edges = (edges[0], edges[1][0], edges[1][1])
return set(edges).union(set([(e[-1], _reverse_relation(e[1]), e[0]) for e in edges]))
def __hash__(self):
return hash((self.formula, self.target_node, self.anchor_nodes))
def __eq__(self, other):
return (self.formula, self.target_node, self.anchor_nodes) == (other.formula, other.target_node, other.anchor_nodes)
def __neq__(self, other):
return self.__hash__() != other.__hash__()
def serialize(self):
if self.query_graph is None:
raise Exception("Cannot serialize query loaded with query graph!")
return (self.query_graph, self.neg_samples, self.hard_neg_samples)
@staticmethod
def deserialize(serial_info, keep_graph=False):
return Query(serial_info[0], serial_info[1], serial_info[2], None if serial_info[1] is None else len(serial_info[1]), keep_graph=keep_graph)
class Graph():
"""
Simple container for heteregeneous graph data.
"""
def __init__(self, features, feature_dims, relations, adj_lists):
self.features = features
self.feature_dims = feature_dims
self.relations = relations
self.adj_lists = adj_lists
self.full_sets = defaultdict(set)
self.full_lists = {}
self.meta_neighs = defaultdict(dict)
for rel, adjs in self.adj_lists.items():
full_set = set(self.adj_lists[rel].keys())
self.full_sets[rel[0]] = self.full_sets[rel[0]].union(full_set)
for mode, full_set in self.full_sets.items():
self.full_lists[mode] = list(full_set)
self._cache_edge_counts()
self._make_flat_adj_lists()
def _make_flat_adj_lists(self):
self.flat_adj_lists = defaultdict(lambda : defaultdict(list))
for rel, adjs in self.adj_lists.items():
for node, neighs in adjs.items():
self.flat_adj_lists[rel[0]][node].extend([(rel, neigh) for neigh in neighs])
def _cache_edge_counts(self):
self.edges = 0.
self.rel_edges = {}
for r1 in self.relations:
for r2 in self.relations[r1]:
rel = (r1,r2[1], r2[0])
self.rel_edges[rel] = 0.
for adj_list in list(self.adj_lists[rel].values()):
self.rel_edges[rel] += len(adj_list)
self.edges += 1.
self.rel_weights = OrderedDict()
self.mode_edges = defaultdict(float)
self.mode_weights = OrderedDict()
for rel, edge_count in self.rel_edges.items():
self.rel_weights[rel] = edge_count / self.edges
self.mode_edges[rel[0]] += edge_count
for mode, edge_count in self.mode_edges.items():
self.mode_weights[mode] = edge_count / self.edges
def remove_edges(self, edge_list):
for edge in edge_list:
try:
self.adj_lists[edge[1]][edge[0]].remove(edge[-1])
except Exception:
continue
try:
self.adj_lists[_reverse_relation(edge[1])][edge[-1]].remove(edge[0])
except Exception:
continue
self.meta_neighs = defaultdict(dict)
self._cache_edge_counts()
self._make_flat_adj_lists()
def get_all_edges(self, seed=0, exclude_rels=set([])):
"""
Returns all edges in the form (node1, relation, node2)
"""
edges = []
random.seed(seed)
for rel, adjs in self.adj_lists.items():
if rel in exclude_rels:
continue
for node, neighs in adjs.items():
edges.extend([(node, rel, neigh) for neigh in neighs if neigh != -1])
random.shuffle(edges)
return edges
def get_all_edges_byrel(self, seed=0,
exclude_rels=set([])):
random.seed(seed)
edges = defaultdict(list)
for rel, adjs in self.adj_lists.items():
if rel in exclude_rels:
continue
for node, neighs in adjs.items():
edges[(rel,)].extend([(node, neigh) for neigh in neighs if neigh != -1])
def get_negative_edge_samples(self, edge, num, rejection_sample=True):
if rejection_sample:
neg_nodes = set([])
counter = 0
while len(neg_nodes) < num:
neg_node = random.choice(self.full_lists[edge[1][0]])
if not neg_node in self.adj_lists[_reverse_relation(edge[1])][edge[2]]:
neg_nodes.add(neg_node)
counter += 1
if counter > 100*num:
return self.get_negative_edge_samples(edge, num, rejection_sample=False)
else:
neg_nodes = self.full_sets[edge[1][0]] - self.adj_lists[_reverse_relation(edge[1])][edge[2]]
neg_nodes = list(neg_nodes) if len(neg_nodes) <= num else random.sample(list(neg_nodes), num)
return neg_nodes
def sample_test_queries(self, train_graph, q_types, samples_per_type, neg_sample_max, verbose=True):
queries = []
for q_type in q_types:
sampled = 0
while sampled < samples_per_type:
q = self.sample_query_subgraph_bytype(q_type)
if q is None or not train_graph._is_negative(q, q[1][0], False):
continue
negs, hard_negs = self.get_negative_samples(q)
if negs is None or ("inter" in q[0] and hard_negs is None):
continue
query = Query(q, negs, hard_negs, neg_sample_max=neg_sample_max, keep_graph=True)
queries.append(query)
sampled += 1
if sampled % 1000 == 0 and verbose:
print("Sampled", sampled)
return queries
def sample_queries(self, arity, num_samples, neg_sample_max, verbose=True):
sampled = 0
queries = []
while sampled < num_samples:
q = self.sample_query_subgraph(arity)
if q is None:
continue
negs, hard_negs = self.get_negative_samples(q)
if negs is None or ("inter" in q[0] and hard_negs is None):
continue
query = Query(q, negs, hard_negs, neg_sample_max=neg_sample_max, keep_graph=True)
queries.append(query)
sampled += 1
if sampled % 1000 == 0 and verbose:
print("Sampled", sampled)
return queries
def get_negative_samples(self, query):
if query[0] == "3-chain" or query[0] == "2-chain":
edges = query[1:]
rels = [_reverse_relation(edge[1]) for edge in edges[::-1]]
meta_neighs = self.get_metapath_neighs(query[-1][-1], tuple(rels))
negative_samples = self.full_sets[query[1][1][0]] - meta_neighs
if len(negative_samples) == 0:
return None, None
else:
return negative_samples, None
elif query[0] == "2-inter" or query[0] == "3-inter":
rel_1 = _reverse_relation(query[1][1])
union_neighs = self.adj_lists[rel_1][query[1][-1]]
inter_neighs = self.adj_lists[rel_1][query[1][-1]]
for i in range(2,len(query)):
rel = _reverse_relation(query[i][1])
union_neighs = union_neighs.union(self.adj_lists[rel][query[i][-1]])
inter_neighs = inter_neighs.intersection(self.adj_lists[rel][query[i][-1]])
neg_samples = self.full_sets[query[1][1][0]] - inter_neighs
hard_neg_samples = union_neighs - inter_neighs
if len(neg_samples) == 0 or len(hard_neg_samples) == 0:
return None, None
return neg_samples, hard_neg_samples
elif query[0] == "3-inter_chain":
rel_1 = _reverse_relation(query[1][1])
union_neighs = self.adj_lists[rel_1][query[1][-1]]
inter_neighs = self.adj_lists[rel_1][query[1][-1]]
chain_rels = [_reverse_relation(edge[1]) for edge in query[2][::-1]]
chain_neighs = self.get_metapath_neighs(query[2][-1][-1], tuple(chain_rels))
union_neighs = union_neighs.union(chain_neighs)
inter_neighs = inter_neighs.intersection(chain_neighs)
neg_samples = self.full_sets[query[1][1][0]] - inter_neighs
hard_neg_samples = union_neighs - inter_neighs
if len(neg_samples) == 0 or len(hard_neg_samples) == 0:
return None, None
return neg_samples, hard_neg_samples
elif query[0] == "3-chain_inter":
inter_rel_1 = _reverse_relation(query[-1][0][1])
inter_neighs_1 = self.adj_lists[inter_rel_1][query[-1][0][-1]]
inter_rel_2 = _reverse_relation(query[-1][1][1])
inter_neighs_2 = self.adj_lists[inter_rel_2][query[-1][1][-1]]
inter_neighs = inter_neighs_1.intersection(inter_neighs_2)
union_neighs = inter_neighs_1.union(inter_neighs_2)
rel = _reverse_relation(query[1][1])
pos_nodes = set([n for neigh in inter_neighs for n in self.adj_lists[rel][neigh]])
union_pos_nodes = set([n for neigh in union_neighs for n in self.adj_lists[rel][neigh]])
neg_samples = self.full_sets[query[1][1][0]] - pos_nodes
hard_neg_samples = union_pos_nodes - pos_nodes
if len(neg_samples) == 0 or len(hard_neg_samples) == 0:
return None, None
return neg_samples, hard_neg_samples
def sample_edge(self, node, mode):
rel, neigh = random.choice(self.flat_adj_lists[mode][node])
edge = (node, rel, neigh)
return edge
def sample_query_subgraph_bytype(self, q_type, start_node=None):
if start_node is None:
start_rel = random.choice(list(self.adj_lists.keys()))
node = random.choice(list(self.adj_lists[start_rel].keys()))
mode = start_rel[0]
else:
node, mode = start_node
if q_type[0] == "3":
if q_type == "3-chain" or q_type == "3-chain_inter":
num_edges = 1
elif q_type == "3-inter_chain":
num_edges = 2
elif q_type == "3-inter":
num_edges = 3
if num_edges > len(self.flat_adj_lists[mode][node]):
return None
if num_edges == 1:
rel, neigh = random.choice(self.flat_adj_lists[mode][node])
edge = (node, rel, neigh)
next_query = self.sample_query_subgraph_bytype(
"2-chain" if q_type == "3-chain" else "2-inter", start_node=(neigh, rel[-1]))
if next_query is None:
return None
if next_query[0] == "2-chain":
return ("3-chain", edge, next_query[1], next_query[2])
else:
return ("3-chain_inter", edge, (next_query[1], next_query[2]))
elif num_edges == 2:
rel_1, neigh_1 = random.choice(self.flat_adj_lists[mode][node])
edge_1 = (node, rel_1, neigh_1)
neigh_2 = neigh_1
rel_2 = rel_1
while (neigh_1, rel_1) == (neigh_2, rel_2):
rel_2, neigh_2 = random.choice(self.flat_adj_lists[mode][node])
edge_2 = (node, rel_2, neigh_2)
return ("3-inter_chain", edge_1, (edge_2, | |
= Datagram.create([75757], 60, DBSERVER_OBJECT_GET_ALL)
dg.add_uint32(3) # Context
dg.add_doid(doid)
self.conn.send(dg)
# Retrieve object from the database
# The values should be updated
dg = Datagram.create([60], 75757, DBSERVER_OBJECT_GET_ALL_RESP)
dg.add_uint32(3) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_string("Oh my gosh! Oh my gosh!! OMG! OMG!!!")
dg.add_uint16(setRDB3)
dg.add_uint32(54231)
self.expect(self.conn, dg)
# Update multiple values
dg = Datagram.create([75757], 60, DBSERVER_OBJECT_SET_FIELDS)
dg.add_doid(doid)
dg.add_uint16(3) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(9999)
dg.add_uint16(setDb3)
dg.add_string("... can you make me a sandwich?")
dg.add_uint16(setADb3)
dg.add_string("sudo make me a sandwich")
self.conn.send(dg)
# Expect SET_FIELDs broadcast
dg = Datagram.create([DATABASE_PREFIX|doid], 60, DBSERVER_OBJECT_SET_FIELDS)
dg.add_doid(doid)
dg.add_uint16(3) # Field count
dg.add_uint16(setDb3)
dg.add_string("... can you make me a sandwich?")
dg.add_uint16(setRDB3)
dg.add_uint32(9999)
dg.add_uint16(setADb3)
dg.add_string("sudo make me a sandwich")
self.expect(self.objects, dg)
# Select all fields from the stored object
dg = Datagram.create([75757], 60, DBSERVER_OBJECT_GET_ALL)
dg.add_uint32(4) # Context
dg.add_doid(doid)
self.conn.send(dg)
# Retrieve object from the database
# The values should be updated
dg = Datagram.create([60], 75757, DBSERVER_OBJECT_GET_ALL_RESP)
dg.add_uint32(4) # Context
dg.add_uint8(SUCCESS) # Status
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(3) # Field count
dg.add_uint16(setDb3)
dg.add_string("... can you make me a sandwich?")
dg.add_uint16(setRDB3)
dg.add_uint32(9999)
dg.add_uint16(setADb3)
dg.add_string("sudo make me a sandwich")
self.expect(self.conn, dg)
# Cleanup
self.deleteObject(60, doid)
self.conn.send(Datagram.create_remove_channel(60))
def test_set_if_empty(self):
self.conn.flush()
self.conn.send(Datagram.create_add_channel(100))
# Create db object
dg = Datagram.create([75757], 100, DBSERVER_CREATE_OBJECT)
dg.add_uint32(1) # Context
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(1) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(55)
self.conn.send(dg)
dg = self.conn.recv_maybe()
self.assertTrue(dg is not None, "Did not receive CreateObjectResp.")
dgi = DatagramIterator(dg)
dgi.seek(CREATE_DOID_OFFSET)
doid = dgi.read_doid()
# Update field with empty value
dg = Datagram.create([75757], 100, DBSERVER_OBJECT_SET_FIELD_IF_EMPTY)
dg.add_uint32(2) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
dg.add_string("Beware... beware!!!") # Field value
self.conn.send(dg)
# Get update response
dg = Datagram.create([100], 75757, DBSERVER_OBJECT_SET_FIELD_IF_EMPTY_RESP)
dg.add_uint32(2) # Context
dg.add_uint8(SUCCESS)
self.expect(self.conn, dg)
# Expect SET_FIELD broadcast
dg = Datagram.create([DATABASE_PREFIX|doid], 100, DBSERVER_OBJECT_SET_FIELD)
dg.add_doid(doid)
dg.add_uint16(setDb3)
dg.add_string("Beware... beware!!!") # Field value
self.expect(self.objects, dg)
# Select object with new value
dg = Datagram.create([75757], 100, DBSERVER_OBJECT_GET_FIELD)
dg.add_uint32(3) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Recieve updated value
dg = Datagram.create([100], 75757, DBSERVER_OBJECT_GET_FIELD_RESP)
dg.add_uint32(3) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(setDb3)
dg.add_string("Beware... beware!!!")
self.expect(self.conn, dg)
# Update field with existing value
dg = Datagram.create([75757], 100, DBSERVER_OBJECT_SET_FIELD_IF_EMPTY)
dg.add_uint32(4) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
dg.add_string("It's raining chocolate!") # New value
self.conn.send(dg)
# Get update failure
dg = Datagram.create([100], 75757, DBSERVER_OBJECT_SET_FIELD_IF_EMPTY_RESP)
dg.add_uint32(4) # Context
dg.add_uint8(FAILURE)
dg.add_uint16(setDb3)
dg.add_string("Beware... beware!!!")
self.expect(self.conn, dg)
# Expect no broadcast
self.expectNone(self.objects)
# Select object
dg = Datagram.create([75757], 100, DBSERVER_OBJECT_GET_FIELD)
dg.add_uint32(3) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Ensure value not updated
dg = Datagram.create([100], 75757, DBSERVER_OBJECT_GET_FIELD_RESP)
dg.add_uint32(3) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(setDb3)
dg.add_string("Beware... beware!!!")
self.expect(self.conn, dg)
# Cleanup
self.deleteObject(100, doid)
self.conn.send(Datagram.create_remove_channel(100))
def test_set_if_equals(self):
self.conn.flush()
self.conn.send(Datagram.create_add_channel(70))
# Create db object
dg = Datagram.create([75757], 70, DBSERVER_CREATE_OBJECT)
dg.add_uint32(1) # Context
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(1) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(767676)
self.conn.send(dg)
dg = self.conn.recv_maybe()
self.assertTrue(dg is not None, "Did not receive CreateObjectResp.")
dgi = DatagramIterator(dg)
dgi.seek(CREATE_DOID_OFFSET)
doid = dgi.read_doid()
# Update field with correct old value
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS)
dg.add_uint32(2) # Context
dg.add_doid(doid)
dg.add_uint16(setRDB3)
dg.add_uint32(767676) # Old value
dg.add_uint32(787878) # New value
self.conn.send(dg)
# Get update response
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP)
dg.add_uint32(2) # Context
dg.add_uint8(SUCCESS)
self.expect(self.conn, dg)
# Expect SET_FIELD broadcast
dg = Datagram.create([DATABASE_PREFIX|doid], 70, DBSERVER_OBJECT_SET_FIELD)
dg.add_doid(doid)
dg.add_uint16(setRDB3)
dg.add_uint32(787878)
self.expect(self.objects, dg)
# Select object with new value
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_GET_ALL)
dg.add_uint32(3) # Context
dg.add_doid(doid)
self.conn.send(dg)
# Recieve updated value
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_GET_ALL_RESP)
dg.add_uint32(3) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(1) # Field Count
dg.add_uint16(setRDB3)
dg.add_uint32(787878)
self.expect(self.conn, dg)
# Update field with incorrect old value
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS)
dg.add_uint32(4) # Context
dg.add_doid(doid)
dg.add_uint16(setRDB3)
dg.add_uint32(767676) # Old value (incorrect)
dg.add_uint32(383838) # New value
self.conn.send(dg)
# Get update failure
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP)
dg.add_uint32(4) # Context
dg.add_uint8(FAILURE)
dg.add_uint16(setRDB3)
dg.add_uint32(787878) # Correct value
self.expect(self.conn, dg)
self.conn.flush()
# Expect no broadcast
self.expectNone(self.objects)
# Comparison existing value to non existing value in update
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS)
dg.add_uint32(5) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
dg.add_string("That was a TERRIBLE surprise!") # Old value
dg.add_string("Wish upon a twinkle...") # New value
self.conn.send(dg)
# Get update failure (old value doesn't exist)
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP)
dg.add_uint32(5) # Context
dg.add_uint8(FAILURE)
self.expect(self.conn, dg)
# Expect no broadcast
self.expectNone(self.objects)
# Update object with partially empty values
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS)
dg.add_uint32(8) # Context
dg.add_doid(doid)
dg.add_uint16(2) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(787878) # Old value
dg.add_uint32(919191) # New value
dg.add_uint16(setDb3)
dg.add_string("I can clear the sky in 10 seconds flat.")
dg.add_string("Jesse!! We have to code!")
self.conn.send(dg)
# Get update failure
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP)
dg.add_uint32(8) # Context
dg.add_uint8(FAILURE)
dg.add_uint16(1) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(787878)
self.expect(self.conn, dg)
# Expect no broadcast
self.expectNone(self.objects)
# Set the empty value to an actual value
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELD)
dg.add_doid(doid)
dg.add_uint16(setDb3)
dg.add_string("Daddy... why did you eat my fries? I bought them... and they were mine.")
self.conn.send(dg)
# Ignore set broadcast
self.objects.flush()
# Sanity check on set field
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_GET_ALL)
dg.add_uint32(10) # Context
dg.add_doid(doid)
self.conn.send(dg)
# Recieve updated value
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_GET_ALL_RESP)
dg.add_uint32(10) # Context
dg.add_uint8(SUCCESS) # Status
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_string("Daddy... why did you eat my fries? I bought them... and they were mine.")
dg.add_uint16(setRDB3)
dg.add_uint32(787878)
self.expect(self.conn, dg)
# Update multiple with correct old values
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS)
dg.add_uint32(9) # Context
dg.add_doid(doid)
dg.add_uint16(2) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(787878) # Old value
dg.add_uint32(919191) # New value
dg.add_uint16(setDb3)
dg.add_string("Daddy... why did you eat my fries? I bought them... and they were mine.")
dg.add_string("Mind if I... take a look inside the barn?!") # New value
self.conn.send(dg)
# Recieve update success
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP)
dg.add_uint32(9) # Context
dg.add_uint8(SUCCESS)
self.expect(self.conn, dg)
# Expect SET_FIELDS broadcast
dg = Datagram.create([DATABASE_PREFIX|doid], 70, DBSERVER_OBJECT_SET_FIELDS)
dg.add_doid(doid)
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_string("Mind if I... take a look inside the barn?!")
dg.add_uint16(setRDB3)
dg.add_uint32(919191)
self.expect(self.objects, dg)
# Select object with new value
dg = Datagram.create([75757], 70, DBSERVER_OBJECT_GET_ALL)
dg.add_uint32(10) # Context
dg.add_doid(doid)
self.conn.send(dg)
# Recieve updated value
dg = Datagram.create([70], 75757, DBSERVER_OBJECT_GET_ALL_RESP)
dg.add_uint32(10) # Context
dg.add_uint8(SUCCESS) # Resp status
dg.add_uint16(DistributedTestObject3) # dclass
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_string("Mind if I... take a look inside the barn?!")
dg.add_uint16(setRDB3)
dg.add_uint32(919191)
self.expect(self.conn, dg)
# Cleanup
self.deleteObject(70, doid)
self.conn.send(Datagram.create_remove_channel(70))
def test_get(self):
self.conn.flush()
self.conn.send(Datagram.create_add_channel(80))
# Create object
dg = Datagram.create([75757], 80, DBSERVER_CREATE_OBJECT)
dg.add_uint32(1) # Context
dg.add_uint16(DistributedTestObject3)
dg.add_uint16(2) # Field count
dg.add_uint16(setRDB3)
dg.add_uint32(1337)
dg.add_uint16(setDb3)
dg.add_string("Uppercut! Downercut! Fireball! Bowl of Punch!")
self.conn.send(dg)
dg = self.conn.recv_maybe()
self.assertTrue(dg is not None, "Did not receive CreateObjectResp.")
dgi = DatagramIterator(dg)
dgi.seek(CREATE_DOID_OFFSET)
doid = dgi.read_doid()
# Select the field
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELD)
dg.add_uint32(2) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Get value in reply
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELD_RESP)
dg.add_uint32(2) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(setDb3)
dg.add_string("Uppercut! Downercut! Fireball! Bowl of Punch!")
self.expect(self.conn, dg)
# Select multiple fields
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELDS)
dg.add_uint32(3) # Context
dg.add_doid(doid)
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_uint16(setRDB3)
self.conn.send(dg)
# Get values in reply
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELDS_RESP)
dg.add_uint32(3) # Context
dg.add_uint8(SUCCESS) # Resp status
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_string("Uppercut! Downercut! Fireball! Bowl of Punch!")
dg.add_uint16(setRDB3)
dg.add_uint32(1337)
self.expect(self.conn, dg)
# Select invalid object
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELD)
dg.add_uint32(4) # Context
dg.add_doid(doid+1)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Get failure
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELD_RESP)
dg.add_uint32(4) # Context
dg.add_uint8(FAILURE)
self.expect(self.conn, dg)
# Select invalid object, multiple fields
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELDS)
dg.add_uint32(5) # Context
dg.add_doid(doid+1)
dg.add_uint16(2) # Field count
dg.add_uint16(setDb3)
dg.add_uint16(setRDB3)
self.conn.send(dg)
# Get failure
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELDS_RESP)
dg.add_uint32(5) # Context
dg.add_uint8(FAILURE)
self.expect(self.conn, dg)
# Clear one field
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_DELETE_FIELD)
dg.add_doid(doid)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Select the cleared field
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELD)
dg.add_uint32(6) # Context
dg.add_doid(doid)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Get failure
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELD_RESP)
dg.add_uint32(6) # Context
dg.add_uint8(FAILURE)
self.expect(self.conn, dg)
# Select the cleared field, with multiple message
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELDS)
dg.add_uint32(7) # Context
dg.add_doid(doid)
dg.add_uint16(1) # Field count
dg.add_uint16(setDb3)
self.conn.send(dg)
# Get success
dg = Datagram.create([80], 75757, DBSERVER_OBJECT_GET_FIELDS_RESP)
dg.add_uint32(7) # Context
dg.add_uint8(SUCCESS)
dg.add_uint16(0) # Field count
self.expect(self.conn, dg)
# Select a cleared and non-cleared field
dg = Datagram.create([75757], 80, DBSERVER_OBJECT_GET_FIELDS)
dg.add_uint32(8) # Context
dg.add_doid(doid)
dg.add_uint16(2) # Field count
dg.add_uint16(setRDB3)
dg.add_uint16(setDb3)
self.conn.send(dg)
# Get success
dg = Datagram.create([80], 75757, | |
<filename>toontown/estate/GardenDropGame.py
import math, random, GameSprite, GardenGameGlobals
from math import pi
from direct.gui.DirectGui import *
from direct.distributed.ClockDelta import *
from toontown.toonbase import TTLocalizer
LevelNumber = 1
class GardenDropGame:
def __init__(self):
self.inHelp = False
self.sprites = []
self.lastTime = []
self.grid = []
print ('Grid Dimensions X%s Z%s' % (GardenGameGlobals.gX,
GardenGameGlobals.gZ))
base.gardenGame = self
self.matchList = []
self.massCount = 0
self.foundCount = 0
return None
def reinitialize(self):
self.inHelp = False
self.sprites = []
self.lastTime = []
self.grid = []
self.matchList = []
self.massCount = 0
self.foundCount = 0
return None
def load(self):
model = loader.loadModel('phase_5.5/models/gui/package_delivery_panel.bam')
model1 = loader.loadModel('phase_3.5/models/gui/matching_game_gui.bam')
self.model = model
self.model1 = model1
background = model.find('**/bg')
itemBoard = model.find('**/item_board')
self.frame = DirectFrame(scale=1.1000000000000001, relief=DGG.FLAT, frameSize=(-0.5,
0.5,
-0.45000000000000001,
-0.050000000000000003), frameColor=(0.73699999999999999, 0.57299999999999995, 0.34499999999999997, 1.0))
self.background = DirectFrame(self.frame, image=background, image_scale=0.050000000000000003, relief=None, pos=(0, 1, 0))
self.itemBoard = DirectFrame(parent=self.frame, image=itemBoard, image_scale=0.050000000000000003, image_color=(0.92200000000000004, 0.92200000000000004, 0.753, 1), relief=None, pos=(0, 1, 0))
gui2 = loader.loadModel('phase_3/models/gui/quit_button.bam')
self.font = loader.loadFont("phase_3/models/fonts/MickeyFont.bam")
self.gardenDropText = OnscreenText(parent=self.frame, text=TTLocalizer.GardenDropTitle,scale=(0.17,0.17,0.17), font=self.font, pos=(0,0.685,0), fg=(1,1,1,1))
self.quitButton = DirectButton(parent=self.frame, relief=None, image=(gui2.find('**/QuitBtn_UP'),
gui2.find('**/QuitBtn_DN'),
gui2.find('**/QuitBtn_RLVR')), pos=(0.5,
1.0,
-0.41999999999999998), scale=0.90000000000000002, text=TTLocalizer.GardenDropExitGame, text_font=self.font, text0_fg=(1, 1, 1, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=0.044999999999999998, text_pos=(0,
-0.01), command=self._GardenDropGame__handleExit)
if LevelNumber == 1:
self.helpButton = DirectButton(parent=self.frame, relief=None, image=(gui2.find('**/QuitBtn_UP'),
gui2.find('**/QuitBtn_DN'),
gui2.find('**/QuitBtn_RLVR')), pos=(-0.5,
1.0,
-0.41999999999999998), scale=0.90000000000000002, text=TTLocalizer.PicnicTableTutorial, text_font=self.font, text0_fg=(1, 1, 1, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=0.044999999999999998, text_pos=(0,
-0.01), command=self._GardenDropGame__openHelp)
def help(self):
self.inHelp = True
frameGui = loader.loadModel('phase_3/models/gui/dialog_box_gui.bam')
self.helpFrame = DirectFrame(scale=1.1, relief=None, image=frameGui, image_scale=(1.75, 1, 0.75), image_color=(1,1,1,1), frameSize=(-0.5,
0.5,
-0.45,
-0.05))
self.font = loader.loadFont("phase_3/models/fonts/MickeyFont.bam")
self.helpText = DirectLabel(scale=1.1, relief=None, text_pos=(0, 0.2), text_wordwrap=16, text=TTLocalizer.GardenDropHelpTitle, text_font=self.font, pos=(0.0, 0.0, 0.0), text_scale=0.1, text0_fg=(1, 1, 1, 1), parent=self.helpFrame)
self.font2 = loader.loadFont("phase_3/models/fonts/Comedy.bam")
self.helpText2 = DirectLabel(scale=1.1, relief=None, text_pos=(-0.6, 0.1), text_wordwrap=15, text=TTLocalizer.GardenDropInstructions, text_font=self.font2, pos=(0.0, 0.0, 0.0), text_scale=0.085, text0_fg=(0, 0, 0, 1), parent=self.helpFrame, text_align=TextNode.ALeft)
gui2 = loader.loadModel('phase_3/models/gui/quit_button.bam')
self.backButton = DirectButton(parent=self.helpFrame, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.5, 1.0, -0.32), scale=0.9, text=TTLocalizer.GardenDropBackToGame, text_font=self.font, text0_fg=(1, 1, 1, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=0.045, text_pos=(0, -0.01), command=self.unloadHelp)
return True
def addSprite(self, image, size = 0.5, posX = 0, posZ = 0, found = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
if LevelNumber == 1 or LevelNumber == 2:
colorChoice = random.choice(range(0, 3))
if LevelNumber == 3 or LevelNumber == 4:
colorChoice = random.choice(range(0, 4))
if LevelNumber == 5:
colorChoice = random.choice(range(0, 5))
newSprite = GameSprite.GameSprite(nodeObj, colorChoice, found)
self.sprites.append(newSprite)
if found:
self.foundCount += 1
return newSprite
def addUnSprite(self, image, size = 0.5, posX = 0, posZ = 0):
nodeObj = DirectLabel(parent=self.frame, relief=None, image=image, pos=(posX, 0.0, posZ), scale=size, image_color=(1.0, 1.0, 1.0, 1))
newSprite = GameSprite.GameSprite(nodeObj)
newSprite = GameSprite.GameSprite(nodeObj)
return newSprite
def testPointDistanceSquare(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
if distC == 0:
distC = 1e-10
return distC
def testDistance(self, nodeA, nodeB):
distX = nodeA.getX() - nodeB.getX()
distZ = nodeA.getZ() - nodeB.getZ()
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
return dist
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
returnTrue
def getValidGrid(self, x, z):
if x < 0 or x >= GardenGameGlobals.gridDimX:
return None
elif z < 0 or z >= GardenGameGlobals.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= GardenGameGlobals.gridDimX:
return -1
elif z < 0 or z >= GardenGameGlobals.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].colorType
return True
def getSprite(self, spriteIndex):
if spriteIndex >= len(self.sprites) or self.sprites[spriteIndex].markedForDeath:
return None
else:
return self.sprites[spriteIndex]
return None
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in xrange(GardenGameGlobals.gridDimX):
for countZ in xrange(GardenGameGlobals.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ)):
currentClosest = self.grid[countX][countZ]
self.closestX = countX
self.closestZ = countZ
currentDist = testDist
return currentClosest
def findGridCog(self):
GardenGameGlobals.cogX = 0
GardenGameGlobals.cogZ = 0
self.massCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
GardenGameGlobals.cogX += cell[1]
GardenGameGlobals.cogZ += cell[2]
self.massCount += 1
if self.massCount > 0:
self.cogX = (GardenGameGlobals.cogX / self.massCount)
self.cogZ = (GardenGameGlobals.cogZ / self.massCount)
self.cogSprite.setX(self.cogX)
self.cogSprite.setZ(self.cogZ)
else:
self.doOnClearGrid()
return True
def stickInGrid(self, sprite, force = 0):
if sprite.isActive and not sprite.isQue:
gridCell = self.findGrid(sprite.getX(), sprite.getZ(), force)
if gridCell:
gridCell[0] = sprite
sprite.setActive(0)
sprite.setX(gridCell[1])
sprite.setZ(gridCell[2])
self.createMatchList(self.closestX, self.closestZ)
if len(self.matchList) >= 3:
self.clearMatchList()
self.findGridCog()
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return True
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType:
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType:
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType:
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType:
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType:
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType:
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType:
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType:
self.fillMatchList(cellX - 1, cellZ - 1)
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def clearMatchList(self):
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
gridEntry[0] = None
sprite.markedForDeath = 1
return True
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = 0
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = 1
return gotNeighbor
def __colTest(self):
if not hasattr(self, 'tick'):
self.tick = 0
self.tick += 1
if self.tick > 5:
self.tick = 0
sizeSprites = len(self.sprites)
for movingSpriteIndex in xrange(len(self.sprites)):
for testSpriteIndex in xrange(movingSpriteIndex, len(self.sprites)):
movingSprite = self.getSprite(movingSpriteIndex)
testSprite = self.getSprite(testSpriteIndex)
if testSprite and movingSprite:
if movingSpriteIndex != testSpriteIndex and (movingSprite.isActive or testSprite.isActive):
if movingSprite.isQue or testSprite.isQue:
if self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < GardenGameGlobals.queExtent * (movingSprite.size + testSprite.size):
self.push(movingSprite, testSprite)
elif self.testDistance(movingSprite.nodeObj, testSprite.nodeObj) < movingSprite.size + testSprite.size:
if movingSprite.isActive:
testSprite.isActive or self.__collide(movingSprite, testSprite)
if self.testDistance(self.cogSprite.nodeObj, testSprite.nodeObj) < (self.cogSprite.size + testSprite.size):
if movingSprite.isActive:
self.stickInGrid(testSprite, 1)
if self.tick == 5:
pass
def __collide(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
else:
test.velX = 0
test.velZ = 0
move.velX = 0
move.velZ = 0
test.collide()
move.collide()
self.stickInGrid(move,1)
self.stickInGrid(test,1)
if queHit:
forceM = 0.1
distX = que.getX() - hit.getX()
distZ = que.getZ() - hit.getZ()
self.stickInGrid(move,1)
self.stickInGrid(test,1)
def push(self, move, test):
queHit = 0
if move.isQue:
que = move
hit = test
queHit = 1
elif test.isQue:
que = test
hit = move
queHit = 1
if queHit:
forceM = 0.1
dist = self.testDistance(move.nodeObj, test.nodeObj)
if abs(dist) < GardenGameGlobals.queExtent * que.size and abs(dist) > 0:
scaleSize = GardenGameGlobals.queExtent * que.size * 0.5
distFromPara = abs(abs(dist) - scaleSize)
force = (scaleSize - distFromPara) / scaleSize * (dist / abs(dist))
angle = self.angleTwoSprites(que, hit)
if angle < 0:
angle = angle + 2 * pi
if angle > pi * 2.0:
angle | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>, <NAME>, <NAME>, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Development"
import numpy as np
import os
import csv
import cv2
from datetime import datetime
##############################################################################
CREATE_IMAGES = True
CSV_OVERVIEW_FILE = '' # CSV File that contains the information which data should be used for training.
BASE_PATH_TO_DATA = '' # Contains the starting point of navigation to the videos
BASE_PATH_TO_LABELS = '' #... and CSV-label files.
OUTPUT_PATH_MULTIPLE_FRAME = ''
##############################################################################
BOX_PLACEMENT = {}
POLYGON_ENDPOINTS = {}
CSV_DELIMITER_OVERVIEW = ',' #On the which data use to train file
ANIMAL_NUM_SEPERATOR = ';' # The seperator in the above csv-file.
END_OF_CSV = "pred"
ALL_BEHAVIORS = {"Standing": 0, "Lying": 1, "Sleeping": 2, "Out": 3}
POSSIBLE_BEHAVIORS = {"Standing": 0, "Lying": 1, "Sleeping": 2, "Out": 3}
BEHAVIOR_MAPPING = {0: 0, 1: 1, 2: 2, 3: 3}
INTERVAL_LEN = 7
IMAGES_PER_INTERVAL = 4
VIDEO_LEN = 50400
CUT_OFF = int(VIDEO_LEN / INTERVAL_LEN)
WEIGHT_FACTOR = 2.0 # Takes every n-th image per class
CSV_DELIMITER_LABELLING = ',' # Delimiting sign of the labelling csv files.
def _check_consistency_of_behaviorlists():
if len( [x for x in ALL_BEHAVIORS.values() if x not in BEHAVIOR_MAPPING.keys()] ) > 0:
print("Error: Some behaviors exist but are not mapped.")
return False
poss_problems = [ x for x in ALL_BEHAVIORS.values() if x not in POSSIBLE_BEHAVIORS.values() ]
real_problems = [x for x in poss_problems if BEHAVIOR_MAPPING[x] not in POSSIBLE_BEHAVIORS.values()]
if len(real_problems) > 0:
print("Error: Some behaviors are mapped to impossible behaviors.")
return False
ret = False
if(all(x in ALL_BEHAVIORS for x in POSSIBLE_BEHAVIORS)):
ret = True
if not ret:
print("Error: Possible behaviors is not a subset on all behaviors.")
return ret
def get_csv_label_file(species, zoo, individual_num, date, base_path = BASE_PATH_TO_LABELS):
path = base_path+species+"/"+zoo+"/Auswertung/Boris_KI/csv-Dateien/"+date+"_"+species+"_"+zoo+"_"+str(individual_num)+"_SUM-"+str(INTERVAL_LEN)+"s_"+END_OF_CSV+".csv"
if not os.path.exists(path):
print("Error: "+path+" was not found.")
return ""
return path
def get_videofile_list(species, zoo, videolist, date, base_path = BASE_PATH_TO_DATA):
vid_list = []
for vid_num in videolist:
vid_path = base_path+species+"/"+zoo+"/Videos/"+species+"_"+str(vid_num)+"/"+_correct_date(date)+"_"+species+"_"+zoo+"_"+str(vid_num)+".avi"
if not os.path.exists(vid_path):
print("Error: "+vid_path+" was not found.")
return []
vid_list.append(vid_path)
return vid_list
def _correct_date(date):
if not "." in date:
return date
return date.split(".")[2] + "-" + date.split(".")[1] + "-" + date.split(".")[0]
def _create_video_label_mapping(overview_file = CSV_OVERVIEW_FILE, delim = CSV_DELIMITER_OVERVIEW):
"""
Returns a dictionary {Art_Zoo_Enclosure_Num: {individual_number: [ [video_list_per_day] ], [csv_label_list] } }
"""
return_dict = {}
if not os.path.exists(overview_file):
print("Error: Overview-CSV-file was not found.")
return return_dict
with open(overview_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delim)
line_count = 0
for row in csv_reader:
if line_count == 0:
if len(row) != 6:
print("Overview file has the wrong format.")
return return_dict
else:
date = _correct_date(row[0])
species = row[1]
zoo = row[2]
enclosure_num = row[3]
video_nums = row[4].split(ANIMAL_NUM_SEPERATOR)
individual_nums = row[5].split(ANIMAL_NUM_SEPERATOR)
for ind_num in individual_nums:
csv_label_file = get_csv_label_file(species, zoo, ind_num, date)
avi_video_filelist = get_videofile_list(species, zoo, video_nums, date)
if len(csv_label_file) < 1:
continue
if len(avi_video_filelist) < 1:
continue
dict_key = species+"_"+zoo+"_"+str(enclosure_num)
if not dict_key in return_dict.keys():
return_dict[dict_key] = {ind_num: [ [avi_video_filelist], [csv_label_file]]}
elif not ind_num in return_dict[dict_key].keys():
return_dict[dict_key][ind_num] = [ [avi_video_filelist], [csv_label_file] ]
else:
return_dict[dict_key][ind_num][0].append(avi_video_filelist)
return_dict[dict_key][ind_num][1].append(csv_label_file)
line_count += 1
return return_dict
def _create_videolist_for_prediction(overview_file = CSV_OVERVIEW_FILE, delim = CSV_DELIMITER_OVERVIEW):
"""
Returns a dictionary {Art_Zoo_Enclosure_Num: [video_list_per_day] }
"""
return_dict = {}
if not os.path.exists(overview_file):
print("Error: Overview-CSV-file was not found.")
return return_dict
with open(overview_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delim)
line_count = 0
for row in csv_reader:
if line_count == 0:
if len(row) != 6:
print("Overview file has the wrong format.")
return return_dict
else:
date = row[0]
species = row[1]
zoo = row[2]
enclosure_num = row[3]
video_nums = row[4].split(ANIMAL_NUM_SEPERATOR)
avi_video_filelist = get_videofile_list(species, zoo, video_nums, date)
if len(avi_video_filelist) < 1:
continue
dict_key = species+"_"+zoo+"_"+str(enclosure_num)
if not dict_key in return_dict.keys():
return_dict[dict_key] = [ avi_video_filelist ]
else:
return_dict[dict_key].append(avi_video_filelist)
line_count += 1
return return_dict
def _get_labelling_dist_from_csv(csv_filename, delim = CSV_DELIMITER_LABELLING, all_behaviors = ALL_BEHAVIORS, cut_off = CUT_OFF):
"""
Input: csv-labelling file.
Output: {behavior code: amount of occurencies}
Requires csv-file of format
Time_Interval || Start_Frame || End-Frame || Behavior 1 || ... || Behavior n
"""
ret_dict = {}
for behav_code in all_behaviors.values():
ret_dict[behav_code] = 0
if not os.path.exists(csv_filename):
print("Error: CSV-file was not found:"+csv_filename)
return ret_dict
with open(csv_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delim)
line_count = 0
row_to_behavior_mapping = {} #row_index: behavior_code
for row in csv_reader:
if line_count > cut_off:
break
if line_count == 0:
if len(row) != 3 + len(all_behaviors.keys()):
print("Error: CSV-file has wrong fromat:"+csv_filename)
return ret_dict
for j in range(3, len(row)):
behav_name = row[j]
if not behav_name in all_behaviors.keys():
print("Error: CSV-file contains unknown behavior:"+behav_name)
return ret_dict
behav_code = all_behaviors[behav_name]
row_to_behavior_mapping[j] = behav_code
else:
int_row = list(map(int, row))
shown_behav = row_to_behavior_mapping[int_row[3:].index(1)+3]
ret_dict[shown_behav] += 1
line_count += 1
return ret_dict
def _get_labelling_distribution_per_individual(video_label_map_dict, all_behaviors = ALL_BEHAVIORS,
csv_delim_labelling = CSV_DELIMITER_LABELLING, cut_off = CUT_OFF):
"""
Input: {Enclosure_Code: {Individual: [ [[list of vids]], [list of labelling files] ]}}
Output: {Enclosure_Code: {Individual: {behav_code: amount_of_intervals}} }
"""
ret_dict = {}
for enc_code in video_label_map_dict.keys():
dict_of_ind = video_label_map_dict[enc_code]
ret_dict[enc_code] = {}
for ind_num in dict_of_ind.keys():
# Initialise dictionary for specific individual
ret_dict[enc_code][ind_num] = {}
for behav_code in all_behaviors.values():
ret_dict[enc_code][ind_num][behav_code] = 0
labelling_files = dict_of_ind[ind_num][1]
for csv_filename in labelling_files:
label_dist_file = _get_labelling_dist_from_csv(csv_filename = csv_filename,
delim = csv_delim_labelling,
all_behaviors = all_behaviors,
cut_off = cut_off)
for behav_code in all_behaviors.values():
ret_dict[enc_code][ind_num][behav_code] += label_dist_file[behav_code]
return ret_dict
def _get_out_code(all_behaviors = ALL_BEHAVIORS):
if "Out" not in all_behaviors.keys():
print("Error: One possible behavior needs to be Out.")
return -1
return all_behaviors["Out"]
def _map_labellings_to_real_labels(individual_label_overview, behavior_map = BEHAVIOR_MAPPING,
all_behaviors = ALL_BEHAVIORS, possible_behaviors = POSSIBLE_BEHAVIORS):
"""
Input: {Enclosure_Code: {Individual: {behav_code: amount_of_intervals}} }
Output: {Enclosure_Code: {Individual: {behav_code: amount_of_intervals}} } where laufen is mapped to active and so on
"""
ret_dict = {}
for enclosure_code in individual_label_overview.keys():
ret_dict[enclosure_code] = {}
for ind_num in individual_label_overview[enclosure_code]:
ret_dict[enclosure_code][ind_num] = {}
for poss_behav in possible_behaviors.values():
ret_dict[enclosure_code][ind_num][poss_behav] = 0
for behav_code in all_behaviors.values():
real_behav = behavior_map[behav_code]
ret_dict[enclosure_code][ind_num][real_behav] += individual_label_overview[enclosure_code][ind_num][behav_code]
return ret_dict
def _get_labelling_dist(label_dict, possible_behaviors = POSSIBLE_BEHAVIORS, weight_factor = WEIGHT_FACTOR ):
"""
Input: {Enclosure_Code: {Individual: {behav_code: amount_of_intervals}} }
Output: {Enclosure_Code: {Individual: {behav_code: frame modolus}} }
"""
ret_dict = {}
for enclosure_code in label_dict.keys():
ret_dict[enclosure_code] = {}
for ind_num in label_dict[enclosure_code]:
ret_dict[enclosure_code][ind_num] = _get_labelling_dist_one_ind(ind_labeldict = label_dict[enclosure_code][ind_num],
possible_behaviors = possible_behaviors, weight_factor = weight_factor)
return ret_dict
def _get_labelling_dist_one_ind(ind_labeldict, possible_behaviors = POSSIBLE_BEHAVIORS, weight_factor = WEIGHT_FACTOR, sparsity = True):
"""
If sparsity is set and the maximum is lower than 5, we will multiply by a factor to decrease image amount.
Input: {behav_code: amount_of_intervals}
Output: {behav_code: frame modolus}
"""
ind_labeldict_without_out = {}
for behav_code in sorted(ind_labeldict.keys()):
if not behav_code == _get_out_code(possible_behaviors):
ind_labeldict_without_out[behav_code] = ind_labeldict[behav_code]
min_val = min(ind_labeldict_without_out.items(), key=lambda x: x[1])[1]
min_key = min(ind_labeldict_without_out.items(), key=lambda x: x[1])[0]
if min_val == 0:
print("Error: A behavior was observed zero times at an individual. Training might be strange.")
print("Nevertheless: I will take every 100th picture of this individual.")
for behav_code in ind_labeldict_without_out.keys():
ind_labeldict_without_out[behav_code] = 100
return ind_labeldict_without_out
for behav_code in ind_labeldict_without_out.keys():
if behav_code != min_key:
ind_labeldict_without_out[behav_code] = int( np.floor(ind_labeldict_without_out[behav_code]*1.0*weight_factor / min_val) )
else:
ind_labeldict_without_out[behav_code] = 1.0*weight_factor
max_val = max(ind_labeldict_without_out.items(), key=lambda x: x[1])[1]
correction_factor = 1
if max_val < 5:
if max_val == 1:
correction_factor = 6
elif max_val == 2:
correction_factor = 3
elif max_val == 3:
correction_factor = 2
else:
correction_factor = 1.5
for behav_code in ind_labeldict_without_out.keys():
ind_labeldict_without_out[behav_code] = int( np.ceil(ind_labeldict_without_out[behav_code]*correction_factor) )
return ind_labeldict_without_out
def _get_labelling_sequence(csv_filename, cut_off = CUT_OFF, interval_len=INTERVAL_LEN,
behav_map = BEHAVIOR_MAPPING, map_behavior = True, delim = CSV_DELIMITER_LABELLING,
get_intervals = False):
"""
Input: csv_file which contains label sequence
Requires csv-file of format
Time_Interval || Start_Frame || End-Frame || Behavior 1 || ... || Behavior n
Parameter: map_behavior; if activated,
Output: sequence of behavioral categories
"""
ret_list = []
if not os.path.exists(csv_filename):
print("Error: CSV-file was not found:"+csv_filename)
return ret_list
with open(csv_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delim)
line_count = 0
for row in csv_reader:
if line_count > 0:
int_row = list(map(int, row))
shown_behav = int_row[3:].index(1)
if map_behavior:
shown_behav = behav_map[shown_behav]
ret_list.append(shown_behav)
line_count += 1
if line_count > cut_off:
break
if not get_intervals:
return np.repeat(ret_list, interval_len)
return ret_list
def _decide_width_height(width_dims, height_dims, amount_streams):
is_low_res = False
ratios = [width_dims[i]*1.0/height_dims[i] for i in range(len(width_dims))]
#low res: 1.333
#high res: 1.777 oder 1.666
if min(ratios) < 1.4:
is_low_res = True
res | |
"""
Filter and combine various peptide/MHC datasets to derive a composite training set,
optionally including eluted peptides identified by mass-spec.
"""
import sys
import argparse
import os
import json
import collections
from six.moves import StringIO
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
return normalize_allele_name(
s,
raise_on_error=False,
default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--ms-item",
nargs="+",
action="append",
metavar="PMID FILE, ... FILE",
default=[],
help="Mass spec item to curate: PMID and list of files")
parser.add_argument(
"--expression-item",
nargs="+",
action="append",
metavar="LABEL FILE, ... FILE",
default=[],
help="Expression data to curate: dataset label and list of files")
parser.add_argument(
"--ms-out",
metavar="OUT.csv",
help="Out file path (MS data)")
parser.add_argument(
"--expression-out",
metavar="OUT.csv",
help="Out file path (RNA-seq expression)")
parser.add_argument(
"--expression-metadata-out",
metavar="OUT.csv",
help="Out file path for expression metadata, i.e. which samples used")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Leave user in pdb if PMID is unsupported")
PMID_HANDLERS = {}
EXPRESSION_HANDLERS = {}
def load(filenames, **kwargs):
result = {}
for filename in filenames:
if filename.endswith(".csv"):
result[filename] = pandas.read_csv(filename, **kwargs)
elif filename.endswith(".xlsx") or filename.endswith(".xls"):
result[filename] = pandas.read_excel(filename, **kwargs)
else:
result[filename] = filename
return result
def debug(*filenames):
loaded = load(filenames)
import ipdb
ipdb.set_trace()
def handle_pmid_27600516(filename):
"""Gloger, ..., Neri Cancer Immunol Immunother 2016 [PMID 27600516]"""
df = pandas.read_csv(filename)
sample_to_peptides = {}
current_sample = None
for peptide in df.peptide:
if peptide.startswith("#"):
current_sample = peptide[1:]
sample_to_peptides[current_sample] = []
else:
assert current_sample is not None
sample_to_peptides[current_sample].append(peptide.strip().upper())
rows = []
for (sample, peptides) in sample_to_peptides.items():
for peptide in sorted(set(peptides)):
rows.append([sample, peptide])
result_df = pandas.DataFrame(rows, columns=["sample_id", "peptide"])
result_df["sample_type"] = "melanoma_cell_line"
result_df["cell_line"] = result_df.sample_id
result_df["mhc_class"] = "I"
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["hla"] = result_df.sample_id.map({
"FM-82": "HLA-A*02:01 HLA-A*01:01 HLA-B*08:01 HLA-B*15:01 HLA-C*03:04 HLA-C*07:01",
"FM-93/2": "HLA-A*02:01 HLA-A*26:01 HLA-B*40:01 HLA-B*44:02 HLA-C*03:04 HLA-C*05:01",
"Mel-624": "HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-B*14:01 HLA-C*07:02 HLA-C*08:02",
"MeWo": "HLA-A*02:01 HLA-A*26:01 HLA-B*14:02 HLA-B*38:01 HLA-C*08:02 HLA-C*12:03",
"SK-Mel-5": "HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-C*03:03",
})
return result_df
def handle_pmid_23481700(filename):
"""Hassan, ..., <NAME> Mol Cell Proteomics 2015 [PMID 23481700]"""
df = pandas.read_excel(filename, skiprows=10)
assert df["Peptide sequence"].iloc[0] == "TPSLVKSTSQL"
assert df["Peptide sequence"].iloc[-1] == "LPHSVNSKL"
hla = {
"JY": "HLA-A*02:01 HLA-B*07:02 HLA-C*07:02",
"HHC": "HLA-A*02:01 HLA-B*07:02 HLA-B*44:02 HLA-C*05:01 HLA-C*07:02",
}
results = []
for sample_id in ["JY", "HHC"]:
hits_df = df.loc[
df["Int %s" % sample_id].map(
lambda x: {"n.q.": 0, "n.q": 0}.get(x, x)).astype(float) > 0
]
result_df = pandas.DataFrame({
"peptide": hits_df["Peptide sequence"].dropna().values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = "B-LCL-" + sample_id
result_df["hla"] = hla[sample_id]
result_df["sample_type"] = "B-LCL"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["pulldown_antibody"] = "W6/32"
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
# Rename samples to avoid a collision with the JY sample in PMID 25576301.
result_df.sample_id = result_df.sample_id.map({
"JY": "JY.2015",
"HHC": "HHC.2015",
})
return result_df
def handle_pmid_24616531(filename):
"""Mommen, ..., Heck PNAS 2014 [PMID 24616531]"""
df = pandas.read_excel(filename, sheet_name="EThcD")
peptides = df.Sequence.values
assert peptides[0] == "APFLRIAF"
assert peptides[-1] == "WRQAGLSYIRYSQI"
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = "24616531"
result_df["sample_type"] = "B-LCL"
result_df["cell_line"] = "GR"
result_df["pulldown_antibody"] = "W6/32"
# Note: this publication lists hla as "HLA-A*01,-03, B*07,-27, and -C*02,-07"
# we are guessing the exact 4 digit alleles based on this.
result_df["hla"] = "HLA-A*01:01 HLA-A*03:01 HLA-B*07:02 HLA-B*27:05 HLA-C*02:02 HLA-C*07:01"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_25576301(filename):
"""Bassani-Sternberg, ..., Mann Mol Cell Proteomics 2015 [PMID 25576301]"""
df = pandas.read_excel(filename, sheet_name="Peptides")
peptides = df.Sequence.values
assert peptides[0] == "AAAAAAAQSVY"
assert peptides[-1] == "YYYNGKAVY"
column_to_sample = {}
for s in [c for c in df if c.startswith("Intensity ")]:
assert s[-2] == "-"
column_to_sample[s] = s.replace("Intensity ", "")[:-2].strip()
intensity_columns = list(column_to_sample)
rows = []
for _, row in df.iterrows():
x1 = row[intensity_columns]
x2 = x1[x1 > 0].index.map(column_to_sample).value_counts()
x3 = x2[x2 >= 2] # require at least two replicates for each peptide
for sample in x3.index:
rows.append((row.Sequence, sample))
result_df = pandas.DataFrame(rows, columns=["peptide", "sample_id"])
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
allele_map = {
'Fib': "HLA-A*03:01 HLA-A*23:01 HLA-B*08:01 HLA-B*15:18 HLA-C*07:02 HLA-C*07:04",
'HCC1937': "HLA-A*23:01 HLA-A*24:02 HLA-B*07:02 HLA-B*40:01 HLA-C*03:04 HLA-C*07:02",
'SupB15WT': None, # four digit alleles unknown, will drop sample
'SupB15RT': None,
'HCT116': "HLA-A*01:01 HLA-A*02:01 HLA-B*45:01 HLA-B*18:01 HLA-C*05:01 HLA-C*07:01",
# Homozygous at HLA-A:
'HCC1143': "HLA-A*31:01 HLA-A*31:01 HLA-B*35:08 HLA-B*37:01 HLA-C*04:01 HLA-C*06:02",
# Homozygous everywhere:
'JY': "HLA-A*02:01 HLA-A*02:01 HLA-B*07:02 HLA-B*07:02 HLA-C*07:02 HLA-C*07:02",
}
sample_type = {
'Fib': "fibroblast",
'HCC1937': "basal like breast cancer",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "colon carcinoma",
'HCC1143': "basal like breast cancer",
'JY': "B-cell",
}
cell_line = {
'Fib': None,
'HCC1937': "HCC1937",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "HCT116",
'HCC1143': "HCC1143",
'JY': "JY",
}
result_df["hla"] = result_df.sample_id.map(allele_map)
print("Entries before dropping samples with unknown alleles", len(result_df))
result_df = result_df.loc[~result_df.hla.isnull()]
print("Entries after dropping samples with unknown alleles", len(result_df))
result_df["sample_type"] = result_df.sample_id.map(sample_type)
result_df["cell_line"] = result_df.sample_id.map(cell_line)
print(result_df.head(3))
return result_df
def handle_pmid_26992070(*filenames):
"""Ritz, ..., Fugmann Proteomics 2016 [PMID 26992070]"""
# Although this publication seems to suggest that HEK293 are C*07:02
# (figure 3B), in a subsequent publication [PMID 28834231] this group
# gives the HEK293 HLA type as HLA‐A*03:01, HLA‐B*07:02, and HLA‐C*07:01.
# We are therefore using the HLA‐C*07:01 (i.e. the latter) typing results
# here.
allele_text = """
Cell line HLA-A 1 HLA-A 2 HLA-B 1 HLA-B 2 HLA-C 1 HLA-C 2
HEK293 03:01 03:01 07:02 07:02 07:01 07:01
HL-60 01:01 01:01 57:01 57:01 06:02 06:02
RPMI8226 30:01 68:02 15:03 15:10 02:10 03:04
MAVER-1 24:02 26:01 38:01 44:02 05:01 12:03
THP-1 02:01 24:02 15:11 35:01 03:03 03:03
"""
allele_info = pandas.read_csv(
StringIO(allele_text), sep="\t", index_col=0)
allele_info.index = allele_info.index.str.strip()
for gene in ["A", "B", "C"]:
for num in ["1", "2"]:
allele_info[
"HLA-%s %s" % (gene, num)
] = "HLA-" + gene + "*" + allele_info["HLA-%s %s" % (gene, num)]
cell_line_to_allele = allele_info.apply(" ".join, axis=1)
sheets = {}
for f in filenames:
if f.endswith(".xlsx"):
d = pandas.read_excel(f, sheet_name=None, skiprows=1)
sheets.update(d)
dfs = []
for cell_line in cell_line_to_allele.index:
# Using data from DeepQuanTR, which appears to be a consensus between
# two other methods used.
sheet = sheets[cell_line + "_DeepQuanTR"]
replicated = sheet.loc[
sheet[[c for c in sheet if "Sample" in c]].fillna(0).sum(1) > 1
]
df = pandas.DataFrame({
'peptide': replicated.Sequence.values
})
df["sample_id"] = cell_line
df["hla"] = cell_line_to_allele.get(cell_line)
dfs.append(df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["pulldown_antibody"] = "W6/32"
result_df["cell_line"] = result_df["sample_id"]
result_df["sample_type"] = result_df.sample_id.map({
"HEK293": "hek",
"HL-60": "neutrophil",
"RPMI8226": "b-cell",
"MAVER-1": "b-LCL",
"THP-1": "monocyte",
})
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_27412690(filename):
"""Shraibman, ..., Admon Mol Cell Proteomics 2016 [PMID 27412690]"""
hla_types = {
"U-87": "HLA-A*02:01 HLA-B*44:02 HLA-C*05:01",
"T98G": "HLA-A*02:01 HLA-B*39:06 HLA-C*07:02",
"LNT-229": "HLA-A*03:01 HLA-B*35:01 HLA-C*04:01",
}
sample_id_to_cell_line = {
"U-87": "U-87",
"T98G": "T98G",
"LNT-229": "LNT-229",
"U-87+DAC": "U-87",
"T98G+DAC": "T98G",
"LNT-229+DAC": "LNT-229",
}
df = pandas.read_excel(filename)
assert df.Sequence.iloc[0] == "AAAAAAGSGTPR"
intensity_col_to_sample_id = {}
for col in df:
if col.startswith("Intensity "):
sample_id = col.split()[1]
assert sample_id in sample_id_to_cell_line, (col, sample_id)
intensity_col_to_sample_id[col] = sample_id
dfs = []
for (sample_id, cell_line) in sample_id_to_cell_line.items():
intensity_cols = [
c for (c, v) in intensity_col_to_sample_id.items()
if v == sample_id
]
hits_df = df.loc[
(df[intensity_cols] > 0).sum(1) > 1
]
result_df = pandas.DataFrame({
"peptide": hits_df.Sequence.values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = cell_line
result_df["hla"] = hla_types[cell_line]
dfs.append(result_df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["sample_type"] = "glioblastoma"
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_28832583(*filenames):
"""Bassani-Sternberg, ..., Gfeller PLOS Comp. Bio. 2017 [PMID 28832583]"""
# This work also reanalyzes data from
# Pearson, ..., <NAME> Invest 2016 [PMID 27841757]
(filename_dataset1, filename_dataset2) = sorted(filenames)
dataset1 = pandas.read_csv(filename_dataset1, sep="\t")
dataset2 = pandas.read_csv(filename_dataset2, sep="\t")
df = pandas.concat([dataset1, dataset2], ignore_index=True, sort=False)
info_text = """
cell_line origin original_pmid allele1 allele2 allele3 allele4 allele5 allele6
CD165 B-cell 28832583 HLA-A*02:05 HLA-A*24:02 HLA-B*15:01 HLA-B*50:01 HLA-C*03:03 HLA-C*06:02
CM467 B-cell 28832583 HLA-A*01:01 HLA-A*24:02 HLA-B*13:02 HLA-B*39:06 HLA-C*06:02 HLA-C*12:03
GD149 B-cell 28832583 HLA-A*01:01 HLA-A*24:02 HLA-B*38:01 HLA-B*44:03 HLA-C*06:02 HLA-C*12:03
MD155 B-cell 28832583 HLA-A*02:01 HLA-A*24:02 HLA-B*15:01 HLA-B*18:01 HLA-C*03:03 HLA-C*07:01
PD42 B-cell 28832583 HLA-A*02:06 HLA-A*24:02 HLA-B*07:02 HLA-B*55:01 HLA-C*01:02 HLA-C*07:02
RA957 B-cell 28832583 HLA-A*02:20 HLA-A*68:01 HLA-B*35:03 HLA-B*39:01 HLA-C*04:01 HLA-C*07:02
TIL1 TIL 28832583 HLA-A*02:01 HLA-A*02:01 HLA-B*18:01 HLA-B*38:01 HLA-C*05:01
TIL3 TIL 28832583 HLA-A*01:01 HLA-A*23:01 HLA-B*07:02 HLA-B*15:01 HLA-C*12:03 HLA-C*14:02
Apher1 Leukapheresis 28832583 HLA-A*03:01 HLA-A*29:02 HLA-B*44:02 HLA-B*44:03 HLA-C*12:03 HLA-C*16:01
Apher6 Leukapheresis 28832583 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_AC2 B-LCL 27841757 HLA-A*03:01 HLA-A*32:01 HLA-B*27:05 HLA-B*45:01
pat_C B-LCL 27841757 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_CELG B-LCL 27841757 HLA-A*02:01 HLA-A*24:02 HLA-B*15:01 HLA-B*73:01 HLA-C*03:03 HLA-C*15:05
pat_CP2 B-LCL 27841757 HLA-A*11:01 HLA-B*14:02 HLA-B*44:02
pat_FL B-LCL 27841757 HLA-A*03:01 HLA-A*11:01 HLA-B*44:03 HLA-B*50:01
pat_J B-LCL 27841757 HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-C*07:02
pat_JPB3 B-LCL 27841757 HLA-A*02:01 HLA-A*11:01 HLA-B*27:05 HLA-B*56:01
pat_JT2 B-LCL 27841757 HLA-A*11:01 HLA-B*18:03 HLA-B*35:01
pat_M B-LCL 27841757 HLA-A*03:01 HLA-A*29:02 HLA-B*08:01 HLA-B*44:03 HLA-C*07:01 HLA-C*16:01
pat_MA B-LCL 27841757 HLA-A*02:01 HLA-A*29:02 HLA-B*44:03 HLA-B*57:01 HLA-C*07:01 HLA-C*16:01
pat_ML B-LCL 27841757 HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-B*44:03
pat_NS2 B-LCL 27841757 HLA-A*02:01 HLA-B*13:02 HLA-B*41:01
pat_NT B-LCL 27841757 HLA-A*01:01 HLA-A*32:01 HLA-B*08:01
pat_PF1 B-LCL 27841757 HLA-A*01:01 HLA-A*02:01 HLA-B*07:02 HLA-B*44:03 HLA-C*07:02 HLA-C*16:01
pat_R B-LCL 27841757 HLA-A*03:01 HLA-A*29:02 HLA-B*08:01 HLA-B*44:03 HLA-C*07:01 HLA-C*16:01
pat_RT B-LCL 27841757 HLA-A*01:01 HLA-A*02:01 HLA-B*18:01 HLA-B*39:24 HLA-C*05:01 HLA-C*07:01
pat_SR B-LCL 27841757 HLA-A*02:01 HLA-A*23:01 HLA-B*18:01 HLA-B*44:03
pat_ST B-LCL 27841757 HLA-A*03:01 HLA-A*24:02 HLA-B*07:02 HLA-B*27:05
"""
info_df = pandas.read_csv(StringIO(info_text), sep="\t", index_col=0)
info_df.index = info_df.index.str.strip()
info_df["hla"] = info_df[
[c for c in info_df if c.startswith("allele")]
].fillna("").apply(" ".join, axis=1)
results = []
for col in df.columns:
if col.startswith("Intensity "):
sample_id = col.replace("Intensity ", "")
assert sample_id in info_df.index, sample_id
peptides = df.loc[df[col].fillna(0) > 0].Sequence.unique()
result_df = pandas.DataFrame({"peptide": peptides})
result_df["sample_id"] = sample_id
result_df["hla"] = info_df.loc[sample_id].hla
result_df["sample_type"] = info_df.loc[sample_id].origin
result_df["original_pmid"] = str(
info_df.loc[sample_id].original_pmid)
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
samples = result_df.sample_id.unique()
for sample_id in info_df.index:
assert sample_id in samples, (sample_id, samples)
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["cell_line"] = ""
result_df["pulldown_antibody"] = "W6/32"
return result_df
PMID_31495665_SAMPLE_TYPES = {
"HLA-DR_Lung": "lung",
"HLA-DR_PBMC_HDSC": "pbmc",
"HLA-DR_PBMC_RG1095": "pbmc",
"HLA-DR_PBMC_RG1104": "pbmc",
"HLA-DR_PBMC_RG1248": "pbmc",
"HLA-DR_Spleen": "spleen",
"MAPTAC_A*02:01": "mix:a375,expi293,hek293,hela",
"MAPTAC_A*11:01": "mix:expi293,hela",
"MAPTAC_A*32:01": "mix:a375,expi293,hela",
"MAPTAC_B*07:02": "mix:a375,expi293,hela",
"MAPTAC_B*45:01": "expi293",
"MAPTAC_B*52:01": "mix:a375,expi293",
"MAPTAC_C*03:03": "expi293",
"MAPTAC_C*06:02": "mix:a375,expi293",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "expi293",
"MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "expi293",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "expi293",
"MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "expi293",
"MAPTAC_DRB1*01:01": "mix:a375,b721,expi293,kg1,k562",
"MAPTAC_DRB1*03:01": "expi293",
"MAPTAC_DRB1*04:01": "expi293",
"MAPTAC_DRB1*07:01": "mix:expi293,hek293",
"MAPTAC_DRB1*11:01": "mix:expi293,k562,kg1",
"MAPTAC_DRB1*12:01_dm+": "expi293",
"MAPTAC_DRB1*12:01_dm-": "expi293",
"MAPTAC_DRB1*15:01": "expi293",
"MAPTAC_DRB3*01:01_dm+": "expi293",
"MAPTAC_DRB3*01:01_dm-": | |
<filename>nsls2ptycho/ptycho_gui.py
import sys
import os
import random
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QAction
from nsls2ptycho.ui import ui_ptycho
from nsls2ptycho.core.utils import clean_shared_memory, get_mpi_num_processes, parse_range
from nsls2ptycho.core.ptycho_param import Param
from nsls2ptycho.core.ptycho_recon import PtychoReconWorker, PtychoReconFakeWorker, HardWorker
from nsls2ptycho.core.ptycho_qt_utils import PtychoStream
from nsls2ptycho.core.widgets.list_widget import ListWidget
from nsls2ptycho.core.widgets.mplcanvas import load_image_pil
from nsls2ptycho.core.ptycho.utils import parse_config
from nsls2ptycho._version import __version__
# databroker related
from nsls2ptycho.core.databroker_api import db, load_metadata, get_single_image, get_detector_names, beamline_name
from nsls2ptycho.reconStep_gui import ReconStepWindow
from nsls2ptycho.roi_gui import RoiWindow
from nsls2ptycho.scan_pt import ScanWindow
import h5py
import numpy as np
from numpy import pi
import traceback
# for frontend-backend communication
from posix_ipc import SharedMemory, ExistentialError
import mmap
# set True for testing GUI changes
_TEST = False
# for shared memory
mm_list = []
shm_list = []
class MainWindow(QtWidgets.QMainWindow, ui_ptycho.Ui_MainWindow):
_mainwindow_signal = QtCore.pyqtSignal()
def __init__(self, parent=None, param:Param=None):
super().__init__(parent)
self.setupUi(self)
QtWidgets.QApplication.setStyle('Plastique')
# connect
self.btn_load_probe.clicked.connect(self.loadProbe)
self.btn_load_object.clicked.connect(self.loadObject)
self.ck_init_prb_flag.clicked.connect(self.resetProbeFlg)
self.ck_init_obj_flag.clicked.connect(self.resetObjectFlg)
self.btn_choose_cwd.clicked.connect(self.setWorkingDirectory)
self.cb_dataloader.currentTextChanged.connect(self.setLoadButton)
self.btn_load_scan.clicked.connect(self.loadExpParam)
self.btn_view_frame.clicked.connect(self.viewDataFrame)
self.ck_extra_scans_flag.clicked.connect(self.updateExtraScansFlg)
self.btn_set_extra_scans.clicked.connect(self.setExtraScans)
#self.le_scan_num.editingFinished.connect(self.forceLoad) # too sensitive, why?
self.le_scan_num.textChanged.connect(self.forceLoad)
self.cb_dataloader.currentTextChanged.connect(self.forceLoad)
self.cb_detectorkind.currentTextChanged.connect(self.forceLoad)
self.ck_mode_flag.clicked.connect(self.modeMultiSliceGuard)
self.ck_multislice_flag.clicked.connect(self.modeMultiSliceGuard)
self.ck_mask_obj_flag.clicked.connect(self.updateObjMaskFlg)
self.ck_gpu_flag.clicked.connect(self.updateGpuFlg)
self.ck_bragg_flag.clicked.connect(self.updateBraggFlg)
self.ck_pc_flag.clicked.connect(self.updatePcFlg)
self.ck_position_correction_flag.clicked.connect(self.updateCorrFlg)
self.ck_refine_data_flag.clicked.connect(self.updateRefineDataFlg)
self.ck_postprocessing_flag.clicked.connect(self.showNoPostProcessingWarning)
self.ck_batch_crop_flag.clicked.connect(self.updateBatchCropDataFlg)
self.cb_dataloader.currentTextChanged.connect(self.updateBatchCropDataFlg)
self.btn_recon_start.clicked.connect(self.start)
self.btn_recon_stop.clicked.connect(self.stop)
self.btn_recon_batch_start.clicked.connect(self.batchStart)
self.btn_recon_batch_stop.clicked.connect(self.batchStop)
self.ck_init_prb_batch_flag.stateChanged.connect(self.switchProbeBatch)
self.ck_init_obj_batch_flag.stateChanged.connect(self.switchObjectBatch)
self.menu_import_config.triggered.connect(self.importConfig)
self.menu_export_config.triggered.connect(self.exportConfig)
self.menu_clear_config_history.triggered.connect(self.removeConfigHistory)
self.menu_save_config_history.triggered.connect(self.saveConfigHistory)
self.actionClear_shared_memory.triggered.connect(self.clearSharedMemory)
self.btn_MPI_file.clicked.connect(self.setMPIfile)
self.le_gpus.textChanged.connect(self.resetMPIFlg)
# setup
self.sp_pha_max.setMaximum(pi)
self.sp_pha_max.setMinimum(-pi)
self.sp_pha_min.setMaximum(pi)
self.sp_pha_min.setMinimum(-pi)
# init.
if param is None:
self.param = Param() # default
else:
self.param = param
self._prb = None
self._obj = None
self._ptycho_gpu_thread = None
self._worker_thread = None
self._db = None # hold the Broker instance that contains the info of the given scan id
self._mds_table = None # hold a Pandas.dataframe instance
self._loaded = False # whether the user has loaded metadata or not (from either databroker or h5)
self._scan_numbers = None # a list of scan numbers for batch mode
self._scan_points = None # an array of shape (2, N) holding the scan coordinates
self._extra_scans_dialog = None
self._batch_prb_filename = None # probe's filename template for batch mode
self._batch_obj_filename = None # object's filename template for batch mode
self._config_path = os.path.expanduser("~") + "/.ptycho_gui/.ptycho_gui_config"
if not os.path.isdir(os.path.dirname(self._config_path)):
os.makedirs(os.path.dirname(self._config_path))
self.reconStepWindow = None
self.roiWindow = None
self.scanWindow = None
# temporary solutions
self.ck_ms_pie_flag.setEnabled(False)
self.ck_weak_obj_flag.setEnabled(False)
#self.cb_alg_flag. addItem("PIE")
#self.cb_alg2_flag.addItem("PIE")
# TODO: find a way to register the live windows so that they can be opened anytime
self.menuWindows.setEnabled(False)
#self.actionROI.setEnabled(False)
#self.actionMonitor.setEnabled(False)
#self.actionScan_points.setEnabled(False)
#if self.menu_save_config_history.isChecked(): # TODO: think of a better way...
self.retrieveConfigHistory()
self.update_gui_from_param()
self.updateExtraScansFlg()
self.updateModeFlg()
self.updateMultiSliceFlg()
self.updateObjMaskFlg()
self.updateBraggFlg()
self.updatePcFlg()
self.updateCorrFlg()
self.updateRefineDataFlg()
self.updateBatchCropDataFlg()
self.checkGpuAvail()
self.updateGpuFlg()
self.resetExperimentalParameters() # probably not necessary
self.setLoadButton()
# generate a unique string for shared memory
if sys.platform.startswith('darwin'): # OS X has a much shorter name limit
self.param.shm_name = os.getlogin()+'_'+str(os.getpid())+'_'+str(random.randrange(256))
else:
self.param.shm_name = 'ptycho_'+os.getlogin()+'_'+str(os.getpid())+'_'+str(random.randrange(256))
# TODO: delete param.shm_name read in from previous config so that we can reset the buttons earlier
self.resetButtons()
# display GUI version
self.setWindowTitle("NSLS-II Ptychography v" + __version__)
@property
def db(self):
# access the Broker instance; the name is probably not intuitive enough...?
return self._db
@db.setter
def db(self, scan_id:int):
# TODO: this should be configured based on selected beamline profile!
self._db = db
def resetButtons(self):
self.btn_recon_start.setEnabled(True)
self.btn_recon_stop.setEnabled(False)
self.btn_recon_batch_start.setEnabled(True)
self.btn_recon_batch_stop.setEnabled(False)
self.recon_bar.setValue(0)
# close the mmap arrays
# removing these arrays, can be changed later if needed
if self._prb is not None:
del self._prb
self._prb = None
if self._obj is not None:
del self._obj
self._obj = None
#if self._scan_points is not None:
# del self._scan_points
# self._scan_points = None
self.close_mmap()
# TODO: consider merging this function with importConfig()?
def retrieveConfigHistory(self):
if os.path.isfile(self._config_path):
try:
param = parse_config(self._config_path, Param())
self.menu_save_config_history.setChecked(param.save_config_history)
if param.save_config_history:
self.param = param
except Exception as ex:
self.exception_handler(ex)
def saveConfigHistory(self):
self.param.save_config_history = self.menu_save_config_history.isChecked()
def removeConfigHistory(self):
if os.path.isfile(self._config_path):
self.param = Param() # default
os.remove(self._config_path)
self.update_gui_from_param()
def update_param_from_gui(self):
p = self.param
# data group
p.scan_num = str(self.le_scan_num.text())
p.detectorkind = str(self.cb_detectorkind.currentText())
p.frame_num = int(self.sp_fram_num.value())
# p.working_directory set by setWorkingDirectory()
# Exp param group
p.xray_energy_kev = float(self.sp_xray_energy.value())
if self.sp_xray_energy.value() != 0.:
p.lambda_nm = 1.2398/self.sp_xray_energy.value()
p.z_m = float(self.sp_detector_distance.value())
p.nx = int(self.sp_x_arr_size.value()) # bookkeeping
p.dr_x = float(self.sp_x_step_size.value())
p.x_range = float(self.sp_x_scan_range.value())
p.ny = int(self.sp_y_arr_size.value()) # bookkeeping
p.dr_y = float(self.sp_y_step_size.value())
p.y_range = float(self.sp_y_scan_range.value())
#p.scan_type = str(self.cb_scan_type.currentText()) # do we need this one?
p.nz = int(self.sp_num_points.value()) # bookkeeping
# recon param group
p.n_iterations = int(self.sp_n_iterations.value())
p.alg_flag = str(self.cb_alg_flag.currentText())
p.alg2_flag = str(self.cb_alg2_flag.currentText())
p.alg_percentage = float(self.sp_alg_percentage.value())
p.sign = str(self.le_sign.text())
p.precision = self.cb_precision_flag.currentText()
p.init_prb_flag = self.ck_init_prb_flag.isChecked()
p.init_obj_flag = self.ck_init_obj_flag.isChecked()
# prb and obj path already set
p.mode_flag = self.ck_mode_flag.isChecked()
p.prb_mode_num = self.sp_prb_mode_num.value()
p.obj_mode_num = self.sp_obj_mode_num.value()
if p.mode_flag and "_mode" not in p.sign:
p.sign = p.sign + "_mode"
p.multislice_flag = self.ck_multislice_flag.isChecked()
p.slice_num = int(self.sp_slice_num.value())
p.slice_spacing_m = float(self.sp_slice_spacing_m.value() * 1e-6)
if p.multislice_flag and "_ms" not in p.sign:
p.sign = p.sign + "_ms"
p.amp_min = float(self.sp_amp_min.value())
p.amp_max = float(self.sp_amp_max.value())
p.pha_min = float(self.sp_pha_min.value())
p.pha_max = float(self.sp_pha_max.value())
p.gpu_flag = self.ck_gpu_flag.isChecked()
p.gpus = parse_range(self.le_gpus.text(), batch_processing=False)
p.gpu_batch_size = int(self.cb_gpu_batch_size.currentText())
# adv param group
p.ccd_pixel_um = float(self.sp_ccd_pixel_um.value())
p.distance = float(self.sp_distance.value())
p.angle_correction_flag = self.ck_angle_correction_flag.isChecked()
p.x_direction = float(self.sp_x_direction.value())
p.y_direction = float(self.sp_y_direction.value())
p.angle = self.sp_angle.value()
p.start_update_probe = self.sp_start_update_probe.value()
p.start_update_object = self.sp_start_update_object.value()
p.ml_mode = self.cb_ml_mode.currentText()
p.ml_weight = self.sp_ml_weight.value()
p.dm_version = self.sp_dm_version.value()
p.cal_scan_pattern_flag = self.ck_cal_scal_pattern_flag.isChecked()
p.nth = self.sp_nth.value()
p.start_ave = self.sp_start_ave.value()
p.processes = self.sp_processes.value()
p.bragg_flag = self.ck_bragg_flag.isChecked()
p.bragg_theta = self.sp_bragg_theta.value()
p.bragg_gamma = self.sp_bragg_gamma.value()
p.bragg_delta = self.sp_bragg_delta.value()
p.pc_flag = self.ck_pc_flag.isChecked()
p.pc_sigma = self.sp_pc_sigma.value()
p.pc_alg = self.cb_pc_alg.currentText()
p.pc_kernel_n = self.sp_pc_kernel_n.value()
p.position_correction_flag = self.ck_position_correction_flag.isChecked()
p.position_correction_start = self.sp_position_correction_start.value()
p.position_correction_step = self.sp_position_correction_step.value()
p.sigma2 = float(self.sp_sigma2.value())
p.beta = float(self.sp_beta.value())
p.display_interval = int(self.sp_display_interval.value())
p.preview_flag = self.ck_preview_flag.isChecked()
p.cal_error_flag = self.ck_cal_error_flag.isChecked()
p.prb_center_flag = self.ck_prb_center_flag.isChecked()
p.mask_obj_flag = self.ck_mask_obj_flag.isChecked()
p.norm_prb_amp_flag = self.ck_norm_prb_amp_flag.isChecked()
p.weak_obj_flag = self.ck_weak_obj_flag.isChecked()
p.ms_pie_flag = self.ck_ms_pie_flag.isChecked()
p.refine_data_flag = self.ck_refine_data_flag.isChecked()
p.refine_data_start_it = int(self.sp_refine_data_start_it.value())
p.refine_data_interval = int(self.sp_refine_data_interval.value())
p.refine_data_step = float(self.sp_refine_data_step.value())
p.profiler_flag = self.ck_profiler_flag.isChecked()
p.postprocessing_flag = self.ck_postprocessing_flag.isChecked()
p.use_NCCL = self.rb_nccl.isChecked()
p.use_CUDA_MPI = self.rb_cuda_mpi.isChecked()
# TODO: organize them
#self.ck_init_obj_dpc_flag.setChecked(p.init_obj_dpc_flag)
#self.ck_mask_prb_flag.setChecked(p.mask_prb_flag)
#self.ck_mesh_flag.setChecked(p.mesh_flag)
#self.ck_sf_flag.setChecked(p.sf_flag)
# batch param group, necessary?
# from the associate scan number window
if self._extra_scans_dialog is not None:
scans = self._extra_scans_dialog.listWidget
num_items = scans.count()
p.asso_scan_numbers = [scans.item(i).text() for i in range(num_items)]
else:
# do not erase this, as keeping it has no harm
pass
def update_gui_from_param(self):
p = self.param
# Data group
self.le_scan_num.setText(p.scan_num)
self.le_working_directory.setText(str(p.working_directory or ''))
self.cb_detectorkind.setCurrentIndex(p.get_detector_kind_index())
self.sp_fram_num.setValue(int(p.frame_num))
# Exp param group
self.sp_xray_energy.setValue(1.2398/float(p.lambda_nm) if 'lambda_nm' in p.__dict__ else 0.)
self.sp_detector_distance.setValue(float(p.z_m) if 'z_m' in p.__dict__ else 0)
self.sp_x_arr_size.setValue(float(p.nx))
self.sp_x_step_size.setValue(float(p.dr_x))
self.sp_x_scan_range.setValue(float(p.x_range))
self.sp_y_arr_size.setValue(float(p.ny))
self.sp_y_step_size.setValue(float(p.dr_y))
self.sp_y_scan_range.setValue(float(p.y_range))
self.cb_scan_type.setCurrentIndex(p.get_scan_type_index())
self.sp_num_points.setValue(int(p.nz))
# recon param group
self.sp_n_iterations.setValue(int(p.n_iterations))
self.cb_alg_flag.setCurrentIndex(p.get_alg_flg_index())
self.cb_alg2_flag.setCurrentIndex(p.get_alg2_flg_index())
self.sp_alg_percentage.setValue(float(p.alg_percentage))
self.le_sign.setText(p.sign)
self.cb_precision_flag.setCurrentText(p.precision)
self.ck_init_prb_flag.setChecked(p.init_prb_flag)
self.le_prb_path.setText(str(p.prb_filename or ''))
self.ck_init_obj_flag.setChecked(p.init_obj_flag)
self.le_obj_path.setText(str(p.obj_filename or ''))
self.ck_mode_flag.setChecked(p.mode_flag)
self.sp_prb_mode_num.setValue(int(p.prb_mode_num))
self.sp_obj_mode_num.setValue(int(p.obj_mode_num))
self.ck_multislice_flag.setChecked(p.multislice_flag)
self.sp_slice_num.setValue(int(p.slice_num))
self.sp_slice_spacing_m.setValue(p.get_slice_spacing_m())
self.sp_amp_max.setValue(float(p.amp_max))
self.sp_amp_min.setValue(float(p.amp_min))
self.sp_pha_max.setValue(float(p.pha_max))
self.sp_pha_min.setValue(float(p.pha_min))
self.ck_gpu_flag.setChecked(p.gpu_flag)
gpu_str = ''
for i, dev_id in enumerate(p.gpus):
gpu_str += str(dev_id)
if i != len(p.gpus) - 1:
gpu_str += ', '
self.le_gpus.setText(gpu_str)
self.cb_gpu_batch_size.setCurrentIndex(p.get_gpu_batch_index())
# set MPI file path from param
if p.mpi_file_path != '':
mpi_filename = os.path.basename(p.mpi_file_path)
self.le_MPI_file_path.setText(mpi_filename)
# TODO: does this make sense?
self.le_gpus.setText('')
# adv param group
self.sp_ccd_pixel_um.setValue(p.ccd_pixel_um)
self.sp_distance.setValue(float(p.distance))
self.ck_angle_correction_flag.setChecked(p.angle_correction_flag)
self.sp_x_direction.setValue(p.x_direction)
self.sp_y_direction.setValue(p.y_direction)
self.sp_angle.setValue(p.angle)
self.sp_start_update_probe.setValue(p.start_update_probe)
self.sp_start_update_object.setValue(p.start_update_object)
self.cb_ml_mode.setCurrentText(p.ml_mode)
self.sp_ml_weight.setValue(p.ml_weight)
self.sp_dm_version.setValue(p.dm_version)
self.ck_cal_scal_pattern_flag.setChecked(p.cal_scan_pattern_flag)
self.sp_nth.setValue(p.nth)
self.sp_start_ave.setValue(p.start_ave)
self.sp_processes.setValue(p.processes)
self.ck_bragg_flag.setChecked(p.bragg_flag)
self.sp_bragg_theta.setValue(p.bragg_theta)
self.sp_bragg_gamma.setValue(p.bragg_gamma)
self.sp_bragg_delta.setValue(p.bragg_delta)
self.ck_pc_flag.setChecked(p.pc_flag)
self.sp_pc_sigma.setValue(p.pc_sigma)
self.cb_pc_alg.setCurrentText(p.pc_alg)
self.sp_pc_kernel_n.setValue(p.pc_kernel_n)
self.ck_position_correction_flag.setChecked(p.position_correction_flag)
self.sp_position_correction_start.setValue(p.position_correction_start)
self.sp_position_correction_step.setValue(p.position_correction_step)
self.sp_sigma2.setValue(p.sigma2)
self.sp_beta.setValue(p.beta)
self.sp_display_interval.setValue(p.display_interval)
self.ck_preview_flag.setChecked(p.preview_flag)
self.ck_cal_error_flag.setChecked(p.cal_error_flag)
self.ck_init_obj_dpc_flag.setChecked(p.init_obj_dpc_flag)
self.ck_prb_center_flag.setChecked(p.prb_center_flag)
self.ck_mask_prb_flag.setChecked(p.mask_prb_flag)
self.ck_mask_obj_flag.setChecked(p.mask_obj_flag)
self.ck_norm_prb_amp_flag.setChecked(p.norm_prb_amp_flag)
self.ck_weak_obj_flag.setChecked(p.weak_obj_flag)
self.ck_mesh_flag.setChecked(p.mesh_flag)
self.ck_ms_pie_flag.setChecked(p.ms_pie_flag)
self.ck_sf_flag.setChecked(p.sf_flag)
self.ck_refine_data_flag.setChecked(p.refine_data_flag)
self.sp_refine_data_start_it.setValue(p.refine_data_start_it)
self.sp_refine_data_interval.setValue(p.refine_data_interval)
self.sp_refine_data_step.setValue(p.refine_data_step)
self.ck_profiler_flag.setChecked(p.profiler_flag)
self.ck_postprocessing_flag.setChecked(p.postprocessing_flag)
self.rb_nccl.setChecked(p.use_NCCL)
self.rb_cuda_mpi.setChecked(p.use_CUDA_MPI)
# batch param group, necessary?
def start(self, batch_mode=False):
if self._ptycho_gpu_thread is not None and self._ptycho_gpu_thread.isFinished():
self._ptycho_gpu_thread = None
if self._ptycho_gpu_thread is None:
if not self._loaded:
print("[WARNING] Remember to click \"Load\" before proceeding!", file=sys.stderr)
return
self.update_param_from_gui() # this has to be done first, so all operations depending on param are correct
self.recon_bar.setValue(0)
self.recon_bar.setMaximum(self.param.n_iterations)
# at least one GPU needs to be selected
if self.param.gpu_flag and len(self.param.gpus) == 0 and self.param.mpi_file_path == '':
print("[WARNING] select at least one GPU!", file=sys.stderr)
return
# batch mode requires some additional changes to param
if batch_mode:
if self._batch_prb_filename is not None:
p = self.param
p.init_prb_flag = False
scan_num = str(self.param.scan_num)
sign = self._batch_prb_filename[1].split('probe')[0]
sign = sign.strip('_')
dirname = p.working_directory + "/recon_result/S" + scan_num + "/" + sign + "/recon_data/"
filename = scan_num.join(self._batch_prb_filename)
p.set_prb_path(dirname, filename)
print("[BATCH] will load " + dirname + filename + " as probe")
if self._batch_obj_filename is not None:
p = self.param
p.init_obj_flag = False
scan_num = str(self.param.scan_num)
sign = self._batch_obj_filename[1].split('object')[0]
sign = sign.strip('_')
dirname = p.working_directory + "/recon_result/S" + scan_num + "/" + sign + "/recon_data/"
filename = scan_num.join(self._batch_obj_filename)
p.set_obj_path(dirname, filename)
print("[BATCH] will load " + dirname + filename + " as object")
# this is needed because MPI processes need to know the working directory...
self._exportConfigHelper(self._config_path)
| |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
from typing import Union
import torch
from pytorch3d import _C
from torch.autograd import Function
from torch.autograd.function import once_differentiable
_KNN = namedtuple("KNN", "dists idx knn")
class _knn_points(Function):
"""
Torch autograd Function wrapper for KNN C++/CUDA implementations.
"""
@staticmethod
# pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
def forward(
ctx,
p1,
p2,
lengths1,
lengths2,
K,
version,
norm: int = 2,
return_sorted: bool = True,
):
"""
K-Nearest neighbors on point clouds.
Args:
p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
containing up to P1 points of dimension D.
p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
containing up to P2 points of dimension D.
lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
length of each pointcloud in p1. Or None to indicate that every cloud has
length P1.
lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
length of each pointcloud in p2. Or None to indicate that every cloud has
length P2.
K: Integer giving the number of nearest neighbors to return.
version: Which KNN implementation to use in the backend. If version=-1,
the correct implementation is selected based on the shapes of the inputs.
norm: (int) indicating the norm. Only supports 1 (for L1) and 2 (for L2).
return_sorted: (bool) whether to return the nearest neighbors sorted in
ascending order of distance.
Returns:
p1_dists: Tensor of shape (N, P1, K) giving the squared distances to
the nearest neighbors. This is padded with zeros both where a cloud in p2
has fewer than K points and where a cloud in p1 has fewer than P1 points.
p1_idx: LongTensor of shape (N, P1, K) giving the indices of the
K nearest neighbors from points in p1 to points in p2.
Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
in p2 has fewer than K points and where a cloud in p1 has fewer than P1 points.
"""
if not ((norm == 1) or (norm == 2)):
raise ValueError("Support for 1 or 2 norm.")
idx, dists = _C.knn_points_idx(p1, p2, lengths1, lengths2, norm, K, version)
# sort KNN in ascending order if K > 1
if K > 1 and return_sorted:
if lengths2.min() < K:
P1 = p1.shape[1]
mask = lengths2[:, None] <= torch.arange(K, device=dists.device)[None]
# mask has shape [N, K], true where dists irrelevant
mask = mask[:, None].expand(-1, P1, -1)
# mask has shape [N, P1, K], true where dists irrelevant
dists[mask] = float("inf")
dists, sort_idx = dists.sort(dim=2)
dists[mask] = 0
else:
dists, sort_idx = dists.sort(dim=2)
# pyre-fixme[16]: `Tensor` has no attribute `gather`.
idx = idx.gather(2, sort_idx)
ctx.save_for_backward(p1, p2, lengths1, lengths2, idx)
ctx.mark_non_differentiable(idx)
ctx.norm = norm
return dists, idx
@staticmethod
@once_differentiable
def backward(ctx, grad_dists, grad_idx):
p1, p2, lengths1, lengths2, idx = ctx.saved_tensors
norm = ctx.norm
# TODO(gkioxari) Change cast to floats once we add support for doubles.
if not (grad_dists.dtype == torch.float32):
grad_dists = grad_dists.float()
if not (p1.dtype == torch.float32):
p1 = p1.float()
if not (p2.dtype == torch.float32):
p2 = p2.float()
grad_p1, grad_p2 = _C.knn_points_backward(
p1, p2, lengths1, lengths2, idx, norm, grad_dists
)
return grad_p1, grad_p2, None, None, None, None, None, None
def knn_points(
p1: torch.Tensor,
p2: torch.Tensor,
lengths1: Union[torch.Tensor, None] = None,
lengths2: Union[torch.Tensor, None] = None,
norm: int = 2,
K: int = 1,
version: int = -1,
return_nn: bool = False,
return_sorted: bool = True,
) -> _KNN:
"""
K-Nearest neighbors on point clouds.
Args:
p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
containing up to P1 points of dimension D.
p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
containing up to P2 points of dimension D.
lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
length of each pointcloud in p1. Or None to indicate that every cloud has
length P1.
lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
length of each pointcloud in p2. Or None to indicate that every cloud has
length P2.
norm: Integer indicating the norm of the distance. Supports only 1 for L1, 2 for L2.
K: Integer giving the number of nearest neighbors to return.
version: Which KNN implementation to use in the backend. If version=-1,
the correct implementation is selected based on the shapes of the inputs.
return_nn: If set to True returns the K nearest neighbors in p2 for each point in p1.
return_sorted: (bool) whether to return the nearest neighbors sorted in
ascending order of distance.
Returns:
dists: Tensor of shape (N, P1, K) giving the squared distances to
the nearest neighbors. This is padded with zeros both where a cloud in p2
has fewer than K points and where a cloud in p1 has fewer than P1 points.
idx: LongTensor of shape (N, P1, K) giving the indices of the
K nearest neighbors from points in p1 to points in p2.
Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
in p2 has fewer than K points and where a cloud in p1 has fewer than P1
points.
nn: Tensor of shape (N, P1, K, D) giving the K nearest neighbors in p2 for
each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th nearest neighbor
for `p1[n, i]`. Returned if `return_nn` is True.
The nearest neighbors are collected using `knn_gather`
.. code-block::
p2_nn = knn_gather(p2, p1_idx, lengths2)
which is a helper function that allows indexing any tensor of shape (N, P2, U) with
the indices `p1_idx` returned by `knn_points`. The output is a tensor
of shape (N, P1, K, U).
"""
if p1.shape[0] != p2.shape[0]:
raise ValueError("pts1 and pts2 must have the same batch dimension.")
if p1.shape[2] != p2.shape[2]:
raise ValueError("pts1 and pts2 must have the same point dimension.")
p1 = p1.contiguous()
p2 = p2.contiguous()
P1 = p1.shape[1]
P2 = p2.shape[1]
if lengths1 is None:
lengths1 = torch.full((p1.shape[0],), P1, dtype=torch.int64, device=p1.device)
if lengths2 is None:
lengths2 = torch.full((p1.shape[0],), P2, dtype=torch.int64, device=p1.device)
# pyre-fixme[16]: `_knn_points` has no attribute `apply`.
p1_dists, p1_idx = _knn_points.apply(
p1, p2, lengths1, lengths2, K, version, norm, return_sorted
)
p2_nn = None
if return_nn:
p2_nn = knn_gather(p2, p1_idx, lengths2)
return _KNN(dists=p1_dists, idx=p1_idx, knn=p2_nn if return_nn else None)
def knn_gather(
x: torch.Tensor, idx: torch.Tensor, lengths: Union[torch.Tensor, None] = None
):
"""
A helper function for knn that allows indexing a tensor x with the indices `idx`
returned by `knn_points`.
For example, if `dists, idx = knn_points(p, x, lengths_p, lengths, K)`
where p is a tensor of shape (N, L, D) and x a tensor of shape (N, M, D),
then one can compute the K nearest neighbors of p with `p_nn = knn_gather(x, idx, lengths)`.
It can also be applied for any tensor x of shape (N, M, U) where U != D.
Args:
x: Tensor of shape (N, M, U) containing U-dimensional features to
be gathered.
idx: LongTensor of shape (N, L, K) giving the indices returned by `knn_points`.
lengths: LongTensor of shape (N,) of values in the range [0, M], giving the
length of each example in the batch in x. Or None to indicate that every
example has length M.
Returns:
x_out: Tensor of shape (N, L, K, U) resulting from gathering the elements of x
with idx, s.t. `x_out[n, l, k] = x[n, idx[n, l, | |
"longitude": -87.56917349999999,
"population": "95334",
"rank": "313",
"state": "Alabama",
},
{
"city": "Livonia",
"growth_from_2000_to_2013": "-5.4%",
"latitude": 42.36837,
"longitude": -83.35270969999999,
"population": "95208",
"rank": "314",
"state": "Michigan",
},
{
"city": "New Bedford",
"growth_from_2000_to_2013": "1.2%",
"latitude": 41.6362152,
"longitude": -70.93420499999999,
"population": "95078",
"rank": "315",
"state": "Massachusetts",
},
{
"city": "Vacaville",
"growth_from_2000_to_2013": "5.4%",
"latitude": 38.3565773,
"longitude": -121.9877444,
"population": "94275",
"rank": "316",
"state": "California",
},
{
"city": "Brockton",
"growth_from_2000_to_2013": "-0.3%",
"latitude": 42.0834335,
"longitude": -71.0183787,
"population": "94089",
"rank": "317",
"state": "Massachusetts",
},
{
"city": "Roswell",
"growth_from_2000_to_2013": "15.2%",
"latitude": 34.0232431,
"longitude": -84.3615555,
"population": "94034",
"rank": "318",
"state": "Georgia",
},
{
"city": "Beaverton",
"growth_from_2000_to_2013": "17.0%",
"latitude": 45.48706199999999,
"longitude": -122.8037102,
"population": "93542",
"rank": "319",
"state": "Oregon",
},
{
"city": "Quincy",
"growth_from_2000_to_2013": "5.8%",
"latitude": 42.2528772,
"longitude": -71.0022705,
"population": "93494",
"rank": "320",
"state": "Massachusetts",
},
{
"city": "Sparks",
"growth_from_2000_to_2013": "39.4%",
"latitude": 39.5349112,
"longitude": -119.7526886,
"population": "93282",
"rank": "321",
"state": "Nevada",
},
{
"city": "Yakima",
"growth_from_2000_to_2013": "11.7%",
"latitude": 46.6020711,
"longitude": -120.5058987,
"population": "93257",
"rank": "322",
"state": "Washington",
},
{
"city": "Lee's Summit",
"growth_from_2000_to_2013": "31.2%",
"latitude": 38.9108408,
"longitude": -94.3821724,
"population": "93184",
"rank": "323",
"state": "Missouri",
},
{
"city": "Federal Way",
"growth_from_2000_to_2013": "8.8%",
"latitude": 47.3223221,
"longitude": -122.3126222,
"population": "92734",
"rank": "324",
"state": "Washington",
},
{
"city": "Carson",
"growth_from_2000_to_2013": "2.9%",
"latitude": 33.8316745,
"longitude": -118.281693,
"population": "92599",
"rank": "325",
"state": "California",
},
{
"city": "Santa Monica",
"growth_from_2000_to_2013": "9.6%",
"latitude": 34.0194543,
"longitude": -118.4911912,
"population": "92472",
"rank": "326",
"state": "California",
},
{
"city": "Hesperia",
"growth_from_2000_to_2013": "46.1%",
"latitude": 34.4263886,
"longitude": -117.3008784,
"population": "92147",
"rank": "327",
"state": "California",
},
{
"city": "Allen",
"growth_from_2000_to_2013": "104.0%",
"latitude": 33.1031744,
"longitude": -96.67055030000002,
"population": "92020",
"rank": "328",
"state": "Texas",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "74.4%",
"latitude": 35.2327544,
"longitude": -106.6630437,
"population": "91956",
"rank": "329",
"state": "New Mexico",
},
{
"city": "Yuma",
"growth_from_2000_to_2013": "16.2%",
"latitude": 32.6926512,
"longitude": -114.6276916,
"population": "91923",
"rank": "330",
"state": "Arizona",
},
{
"city": "Westminster",
"growth_from_2000_to_2013": "3.9%",
"latitude": 33.7513419,
"longitude": -117.9939921,
"population": "91739",
"rank": "331",
"state": "California",
},
{
"city": "Orem",
"growth_from_2000_to_2013": "8.5%",
"latitude": 40.2968979,
"longitude": -111.6946475,
"population": "91648",
"rank": "332",
"state": "Utah",
},
{
"city": "Lynn",
"growth_from_2000_to_2013": "2.6%",
"latitude": 42.46676300000001,
"longitude": -70.9494938,
"population": "91589",
"rank": "333",
"state": "Massachusetts",
},
{
"city": "Redding",
"growth_from_2000_to_2013": "11.9%",
"latitude": 40.5865396,
"longitude": -122.3916754,
"population": "91119",
"rank": "334",
"state": "California",
},
{
"city": "Spokane Valley",
"growth_from_2000_to_2013": "12.6%",
"latitude": 47.6732281,
"longitude": -117.2393748,
"population": "91113",
"rank": "335",
"state": "Washington",
},
{
"city": "Miami Beach",
"growth_from_2000_to_2013": "3.3%",
"latitude": 25.790654,
"longitude": -80.1300455,
"population": "91026",
"rank": "336",
"state": "Florida",
},
{
"city": "League City",
"growth_from_2000_to_2013": "98.3%",
"latitude": 29.5074538,
"longitude": -95.0949303,
"population": "90983",
"rank": "337",
"state": "Texas",
},
{
"city": "Lawrence",
"growth_from_2000_to_2013": "12.7%",
"latitude": 38.9716689,
"longitude": -95.2352501,
"population": "90811",
"rank": "338",
"state": "Kansas",
},
{
"city": "Santa Barbara",
"growth_from_2000_to_2013": "0.9%",
"latitude": 34.4208305,
"longitude": -119.6981901,
"population": "90412",
"rank": "339",
"state": "California",
},
{
"city": "Plantation",
"growth_from_2000_to_2013": "8.6%",
"latitude": 26.1275862,
"longitude": -80.23310359999999,
"population": "90268",
"rank": "340",
"state": "Florida",
},
{
"city": "Sandy",
"growth_from_2000_to_2013": "1.3%",
"latitude": 40.5649781,
"longitude": -111.8389726,
"population": "90231",
"rank": "341",
"state": "Utah",
},
{
"city": "Sunrise",
"growth_from_2000_to_2013": "4.6%",
"latitude": 26.1669711,
"longitude": -80.25659499999999,
"population": "90116",
"rank": "342",
"state": "Florida",
},
{
"city": "Macon",
"growth_from_2000_to_2013": "-7.3%",
"latitude": 32.8406946,
"longitude": -83.6324022,
"population": "89981",
"rank": "343",
"state": "Georgia",
},
{
"city": "Longmont",
"growth_from_2000_to_2013": "24.4%",
"latitude": 40.1672068,
"longitude": -105.1019275,
"population": "89919",
"rank": "344",
"state": "Colorado",
},
{
"city": "Boca Raton",
"growth_from_2000_to_2013": "7.5%",
"latitude": 26.3683064,
"longitude": -80.1289321,
"population": "89407",
"rank": "345",
"state": "Florida",
},
{
"city": "San Marcos",
"growth_from_2000_to_2013": "60.0%",
"latitude": 33.1433723,
"longitude": -117.1661449,
"population": "89387",
"rank": "346",
"state": "California",
},
{
"city": "Greenville",
"growth_from_2000_to_2013": "41.9%",
"latitude": 35.612661,
"longitude": -77.3663538,
"population": "89130",
"rank": "347",
"state": "North Carolina",
},
{
"city": "Waukegan",
"growth_from_2000_to_2013": "0.5%",
"latitude": 42.3636331,
"longitude": -87.84479379999999,
"population": "88826",
"rank": "348",
"state": "Illinois",
},
{
"city": "Fall River",
"growth_from_2000_to_2013": "-3.7%",
"latitude": 41.7014912,
"longitude": -71.1550451,
"population": "88697",
"rank": "349",
"state": "Massachusetts",
},
{
"city": "Chico",
"growth_from_2000_to_2013": "14.2%",
"latitude": 39.7284944,
"longitude": -121.8374777,
"population": "88077",
"rank": "350",
"state": "California",
},
{
"city": "Newton",
"growth_from_2000_to_2013": "4.9%",
"latitude": 42.3370413,
"longitude": -71.20922139999999,
"population": "87971",
"rank": "351",
"state": "Massachusetts",
},
{
"city": "San Leandro",
"growth_from_2000_to_2013": "10.3%",
"latitude": 37.7249296,
"longitude": -122.1560768,
"population": "87965",
"rank": "352",
"state": "California",
},
{
"city": "Reading",
"growth_from_2000_to_2013": "8.0%",
"latitude": 40.3356483,
"longitude": -75.9268747,
"population": "87893",
"rank": "353",
"state": "Pennsylvania",
},
{
"city": "Norwalk",
"growth_from_2000_to_2013": "5.6%",
"latitude": 41.11774399999999,
"longitude": -73.4081575,
"population": "87776",
"rank": "354",
"state": "Connecticut",
},
{
"city": "<NAME>",
"growth_from_2000_to_2013": "8.6%",
"latitude": 35.3859242,
"longitude": -94.39854749999999,
"population": "87650",
"rank": "355",
"state": "Arkansas",
},
{
"city": "Newport Beach",
"growth_from_2000_to_2013": "10.4%",
"latitude": 33.6189101,
"longitude": -117.9289469,
"population": "87273",
"rank": "356",
"state": "California",
},
{
"city": "Asheville",
"growth_from_2000_to_2013": "19.6%",
"latitude": 35.5950581,
"longitude": -82.5514869,
"population": "87236",
"rank": "357",
"state": "North Carolina",
},
{
"city": "Nashua",
"growth_from_2000_to_2013": "0.4%",
"latitude": 42.7653662,
"longitude": -71.46756599999999,
"population": "87137",
"rank": "358",
"state": "New Hampshire",
},
{
"city": "Edmond",
"growth_from_2000_to_2013": "26.9%",
"latitude": 35.6528323,
"longitude": -97.47809540000002,
"population": "87004",
"rank": "359",
"state": "Oklahoma",
},
{
"city": "Whittier",
"growth_from_2000_to_2013": "3.3%",
"latitude": 33.9791793,
"longitude": -118.032844,
"population": "86635",
"rank": "360",
"state": "California",
},
{
"city": "Nampa",
"growth_from_2000_to_2013": "57.9%",
"latitude": 43.5407172,
"longitude": -116.5634624,
"population": "86518",
"rank": "361",
"state": "Idaho",
},
{
"city": "Bloomington",
"growth_from_2000_to_2013": "1.3%",
"latitude": 44.840798,
"longitude": -93.2982799,
"population": "86319",
"rank": "362",
"state": "Minnesota",
},
{
"city": "Deltona",
"growth_from_2000_to_2013": "23.1%",
"latitude": 28.9005446,
"longitude": -81.26367379999999,
"population": "86290",
"rank": "363",
"state": "Florida",
},
{
"city": "Hawthorne",
"growth_from_2000_to_2013": "2.3%",
"latitude": 33.9164032,
"longitude": -118.3525748,
"population": "86199",
"rank": "364",
"state": "California",
},
{
"city": "Duluth",
"growth_from_2000_to_2013": "-0.1%",
"latitude": 46.78667189999999,
"longitude": -92.1004852,
"population": "86128",
"rank": "365",
"state": "Minnesota",
},
{
"city": "Carmel",
"growth_from_2000_to_2013": "60.4%",
"latitude": 39.978371,
"longitude": -86.1180435,
"population": "85927",
"rank": "366",
"state": "Indiana",
},
{
"city": "Suffolk",
"growth_from_2000_to_2013": "33.5%",
"latitude": 36.7282054,
"longitude": -76.5835621,
"population": "85728",
"rank": "367",
"state": "Virginia",
},
{
"city": "Clifton",
"growth_from_2000_to_2013": "7.9%",
"latitude": 40.8584328,
"longitude": -74.16375529999999,
"population": "85390",
"rank": "368",
"state": "New Jersey",
},
{
"city": "Citrus Heights",
"growth_from_2000_to_2013": "-0.1%",
"latitude": 38.7071247,
"longitude": -121.2810611,
"population": "85285",
"rank": "369",
"state": "California",
},
{
"city": "Livermore",
"growth_from_2000_to_2013": "15.1%",
"latitude": 37.6818745,
"longitude": -121.7680088,
"population": "85156",
"rank": "370",
"state": "California",
},
{
"city": "Tracy",
"growth_from_2000_to_2013": "45.9%",
"latitude": 37.7396513,
"longitude": -121.4252227,
"population": "84691",
"rank": "371",
"state": "California",
},
{
"city": "Alhambra",
"growth_from_2000_to_2013": "-0.7%",
"latitude": 34.095287,
"longitude": -118.1270146,
"population": "84577",
"rank": "372",
"state": "California",
},
{
"city": "Kirkland",
"growth_from_2000_to_2013": "87.5%",
"latitude": 47.6814875,
"longitude": -122.2087353,
"population": "84430",
"rank": "373",
"state": "Washington",
},
{
"city": "Trenton",
"growth_from_2000_to_2013": "-1.2%",
"latitude": 40.2170534,
"longitude": -74.7429384,
"population": "84349",
"rank": "374",
"state": "New Jersey",
},
{
"city": "Ogden",
"growth_from_2000_to_2013": "8.6%",
"latitude": 41.223,
"longitude": -111.9738304,
"population": "84249",
"rank": "375",
"state": "Utah",
},
{
"city": "Hoover",
"growth_from_2000_to_2013": "32.7%",
"latitude": 33.4053867,
"longitude": -86.8113781,
"population": "84126",
"rank": "376",
"state": "Alabama",
},
{
"city": "Cicero",
"growth_from_2000_to_2013": "-1.6%",
"latitude": 41.8455877,
"longitude": -87.7539448,
"population": "84103",
"rank": "377",
"state": "Illinois",
},
{
"city": "Fishers",
"growth_from_2000_to_2013": "114.8%",
"latitude": 39.9567548,
"longitude": -86.01335,
"population": "83891",
"rank": "378",
"state": "Indiana",
},
{
"city": "Sugar Land",
"growth_from_2000_to_2013": "29.1%",
"latitude": 29.6196787,
"longitude": -95.6349463,
"population": "83860",
"rank": "379",
"state": "Texas",
},
{
"city": "Danbury",
"growth_from_2000_to_2013": "11.4%",
"latitude": 41.394817,
"longitude": -73.4540111,
"population": "83684",
"rank": "380",
"state": "Connecticut",
},
{
"city": "Meridian",
"growth_from_2000_to_2013": "127.6%",
"latitude": 43.6121087,
"longitude": -116.3915131,
"population": "83596",
"rank": "381",
"state": "Idaho",
},
{
"city": "Indio",
"growth_from_2000_to_2013": "66.0%",
"latitude": 33.7205771,
"longitude": -116.2155619,
"population": "83539",
"rank": "382",
"state": "California",
},
{
"city": "Concord",
"growth_from_2000_to_2013": "47.4%",
"latitude": 35.4087517,
"longitude": -80.579511,
"population": "83506",
"rank": "383",
"state": "North Carolina",
},
{
"city": "Menifee",
"growth_from_2000_to_2013": "95.0%",
"latitude": 33.6971468,
"longitude": -117.185294,
"population": "83447",
"rank": "384",
"state": "California",
},
{
"city": "Champaign",
"growth_from_2000_to_2013": "18.3%",
"latitude": 40.1164204,
"longitude": -88.2433829,
"population": "83424",
"rank": "385",
"state": "Illinois",
},
{
"city": "Buena Park",
"growth_from_2000_to_2013": "6.1%",
"latitude": 33.8675143,
"longitude": -117.9981181,
"population": "82882",
"rank": "386",
"state": "California",
},
{
"city": "Troy",
"growth_from_2000_to_2013": "2.2%",
"latitude": 42.6064095,
"longitude": -83.1497751,
"population": "82821",
"rank": "387",
"state": "Michigan",
},
{
"city": "O'Fallon",
"growth_from_2000_to_2013": "62.6%",
"latitude": 38.8106075,
"longitude": -90.69984769999999,
"population": "82809",
"rank": "388",
"state": "Missouri",
},
{
"city": "Johns Creek",
"growth_from_2000_to_2013": "36.5%",
"latitude": 34.0289259,
"longitude": -84.198579,
"population": "82788",
"rank": "389",
"state": "Georgia",
},
{
"city": "Bellingham",
"growth_from_2000_to_2013": "21.8%",
"latitude": 48.74908,
"longitude": -122.4781473,
"population": "82631",
"rank": "390",
"state": "Washington",
},
{
"city": "Westland",
"growth_from_2000_to_2013": "-4.7%",
"latitude": 42.32420399999999,
"longitude": -83.400211,
"population": "82578",
"rank": "391",
"state": "Michigan",
| |
<filename>benchmark/run_1yr_fullchem_benchmark.py
#!/usr/bin/env python
"""
run_1yr_fullchem_benchmark.py: Driver script for creating benchmark plots and
testing gcpy 1-year full-chemistry benchmark
capability.
Run this script to generate benchmark comparisons between:
(1) GCC (aka GEOS-Chem "Classic") vs. GCC
(2) GCHP vs GCC
(3) GCHP vs GCHP
You can customize this by editing the settings in the corresponding yaml
config file (eg. 1yr_fullchem_benchmark.yml).
Calling sequence:
./run_1yr_fullchem_benchmark.py <path-to-configuration-file>
To test gcpy, copy this script and the corresponding yaml config file
anywhere you want to run the test. Set gcpy_test to True at the top
of the script. Benchmark artifacts will be created locally in new folder
called Plots.
Remarks:
By default, matplotlib will try to open an X window for plotting.
If you are running this script in an environment where you do not have
an active X display (such as in a computational queue), then you will
need to use these commands to disable the X-window functionality.
import os
os.environ["QT_QPA_PLATFORM"]="offscreen"
For more information, please see this issue posted at the ipython site:
https://github.com/ipython/ipython/issues/10627
This script corresponds with GCPy 1.1.0. Edit this version ID if releasing
a new version of GCPy.
"""
# =====================================================================
# Imports and global settings (you should not need to edit these)
# =====================================================================
import os
import sys
from os.path import join, exists
import warnings
from shutil import copyfile
from calendar import monthrange
import numpy as np
from joblib import Parallel, delayed
from gcpy.util import get_filepath, get_filepaths, read_config_file
import gcpy.ste_flux as ste
import gcpy.oh_metrics as oh
import gcpy.budget_ox as ox
from gcpy import benchmark as bmk
# Tell matplotlib not to look for an X-window
os.environ["QT_QPA_PLATFORM"] = "offscreen"
# Suppress annoying warning messages
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
def run_benchmark(config):
"""
Runs 1 year benchmark with the given configuration settings.
Args:
config : dict
Contains configuration for 1yr benchmark from yaml file.
"""
# This script has a fixed benchmark type
bmk_type = "FullChemBenchmark"
bmk_year_ref = "2019"
bmk_year_dev = "2019"
bmk_mon_strs = ["Jan", "Apr", "Jul", "Oct"]
bmk_mon_inds = [0, 3, 6, 9]
bmk_n_months = len(bmk_mon_strs)
########################################################################
### CONFIGURABLE SETTINGS: ***EDIT AS NEEDED *** ###
########################################################################
# Path to species_databse.yml
spcdb_dir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"]
)
# ======================================================================
# Data directories
# For gchp_vs_gcc_refdir use config["data"]["dev"]["gcc"]["version"], not ref (mps, 6/27/19)
# ======================================================================
# Diagnostics file directory paths
gcc_vs_gcc_refdir = join(
config["paths"]["main_dir"],
config["data"]["ref"]["gcc"]["version"],
config["data"]["ref"]["gcc"]["subdir"],
)
gcc_vs_gcc_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["data"]["dev"]["gcc"]["subdir"],
)
gchp_vs_gcc_refdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["data"]["dev"]["gcc"]["subdir"],
)
gchp_vs_gcc_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["data"]["dev"]["gchp"]["subdir"],
)
gchp_vs_gchp_refdir = join(
config["paths"]["main_dir"],
config["data"]["ref"]["gchp"]["version"],
config["data"]["ref"]["gchp"]["subdir"],
)
gchp_vs_gchp_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["data"]["dev"]["gchp"]["subdir"],
)
# Restart file directory paths
gcc_vs_gcc_refrstdir = join(
config["paths"]["main_dir"], config["data"]["ref"]["gcc"]["version"], "restarts"
)
gcc_vs_gcc_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"], "restarts"
)
gchp_vs_gcc_refrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"], "restarts"
)
gchp_vs_gcc_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gchp"]["version"]
)
gchp_vs_gchp_refrstdir = join(
config["paths"]["main_dir"], config["data"]["ref"]["gchp"]["version"]
)
gchp_vs_gchp_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gchp"]["version"]
)
# Log file directories -- GEOS-Chem "Classic" only
gcc_vs_gcc_reflogdir = join(
config["paths"]["main_dir"], config["data"]["ref"]["gcc"]["version"], "logs"
)
gcc_vs_gcc_devlogdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"], "logs"
)
# ======================================================================
# Benchmark output directories
# ======================================================================
# Plot directories
if config["options"]["gcpy_test"]:
mainresultsdir = join(".", config["paths"]["results_dir"])
gcc_vs_gcc_resultsdir = join(
mainresultsdir, config["options"]["comparisons"]["gcc_vs_gcc"]["dir"]
)
gchp_vs_gchp_resultsdir = join(
mainresultsdir, config["options"]["comparisons"]["gchp_vs_gchp"]["dir"]
)
gchp_vs_gcc_resultsdir = join(
mainresultsdir, "GCHP_GCC_comparison"
)
if not exists(mainresultsdir):
os.mkdir(mainresultsdir)
# Make copy of benchmark script in results directory
curfile = os.path.realpath(__file__)
dest = join(mainresultsdir, curfile.split("/")[-1])
if not exists(dest):
copyfile(curfile, dest)
else:
gcc_vs_gcc_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["paths"]["results_dir"],
)
gchp_vs_gchp_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
config["options"]["comparisons"]["gchp_vs_gchp"]["dir"],
)
gchp_vs_gcc_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
config["options"]["comparisons"]["gchp_vs_gcc"]["dir"],
)
base_gchp_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
)
# make results directories that don't exist
for resdir, plotting_type in zip(
[
gcc_vs_gcc_resultsdir,
base_gchp_resultsdir,
gchp_vs_gchp_resultsdir,
gchp_vs_gcc_resultsdir,
],
[
config["options"]["comparisons"]["gcc_vs_gcc"]["run"],
config["options"]["comparisons"]["gchp_vs_gcc"]["run"]
or config["options"]["comparisons"]["gchp_vs_gchp"]["run"],
config["options"]["comparisons"]["gchp_vs_gchp"]["run"],
config["options"]["comparisons"]["gchp_vs_gcc"]["run"],
],
):
if plotting_type and not exists(resdir):
os.mkdir(resdir)
if resdir in [gcc_vs_gcc_resultsdir, base_gchp_resultsdir]:
# Make copy of benchmark script in results directory
curfile = os.path.realpath(__file__)
dest = join(resdir, curfile.split("/")[-1])
if not exists(dest):
copyfile(curfile, dest)
# Tables directories
gcc_vs_gcc_tablesdir = join(
gcc_vs_gcc_resultsdir,
config["options"]["comparisons"]["gcc_vs_gcc"]["tables_subdir"],
)
gchp_vs_gcc_tablesdir = join(
gchp_vs_gcc_resultsdir,
config["options"]["comparisons"]["gchp_vs_gcc"]["tables_subdir"],
)
gchp_vs_gchp_tablesdir = join(
gchp_vs_gchp_resultsdir,
config["options"]["comparisons"]["gchp_vs_gchp"]["tables_subdir"],
)
# Budget directories
gcc_vs_gcc_budgetdir = join(gcc_vs_gcc_resultsdir, "Budget")
gchp_vs_gcc_budgetdir = join(gchp_vs_gcc_resultsdir, "Budget")
gchp_vs_gchp_budgetdir = join(gchp_vs_gchp_resultsdir, "Budget")
# ======================================================================
# Plot title strings
# For gchp_vs_gcc_refstr use config["data"]["dev"]["gcc"]["version"], not ref (mps, 6/27/19)
# ======================================================================
gcc_vs_gcc_refstr = config["data"]["ref"]["gcc"]["version"]
gcc_vs_gcc_devstr = config["data"]["dev"]["gcc"]["version"]
gchp_vs_gcc_refstr = config["data"]["dev"]["gcc"]["version"]
gchp_vs_gcc_devstr = config["data"]["dev"]["gchp"]["version"]
gchp_vs_gchp_refstr = config["data"]["ref"]["gchp"]["version"]
gchp_vs_gchp_devstr = config["data"]["dev"]["gchp"]["version"]
########################################################################
### THE REST OF THESE SETTINGS SHOULD NOT NEED TO BE CHANGED ###
########################################################################
def gchp_metname(prior_to_13):
"""
Returns the proper name for the GCHP StateMet collection.
"""
if prior_to_13:
return "StateMet_avg"
return "StateMet"
# =====================================================================
# Dates and times -- ref data
# =====================================================================
# Month/year strings for use in table subdirectories (e.g. Jan2016)
bmk_mon_yr_strs_ref = [v + bmk_year_ref for v in bmk_mon_strs]
# Get days per month and seconds per month for ref
sec_per_month_ref = np.zeros(12)
days_per_month_ref = np.zeros(12)
for t in range(12):
days_per_month_ref[t] = monthrange(int(bmk_year_ref), t + 1)[1]
sec_per_month_ref[t] = days_per_month_ref[t] * 86400.0
# Get all months array of start datetimes for benchmark year
bmk_start_ref = np.datetime64(bmk_year_ref + "-01-01")
bmk_end_ref = np.datetime64("{}-01-01".format(int(bmk_year_ref) + 1))
all_months_ref = np.arange(
bmk_start_ref, bmk_end_ref, step=np.timedelta64(1, "M"), dtype="datetime64[M]"
)
all_months_gchp_ref = all_months_ref
# Reset all months datetime array if GCHP ref is legacy filename format.
# Legacy format uses time-averaging period mid-point not start.
if config["data"]["ref"]["gchp"]["is_legacy"]:
all_months_gchp_ref = np.zeros(12, dtype="datetime64[h]")
for t in range(12):
middle_hr = int(days_per_month_ref[t] * 24 / 2)
delta = np.timedelta64(middle_hr, "h")
all_months_gchp_ref[t] = all_months_ref[t].astype("datetime64[h]") + delta
# Get subset of month datetimes and seconds per month for only benchmark months
bmk_mons_ref = all_months_ref[bmk_mon_inds]
bmk_mons_gchp_ref = all_months_gchp_ref[bmk_mon_inds]
bmk_sec_per_month_ref = sec_per_month_ref[bmk_mon_inds]
# =====================================================================
# Dates and times -- Dev data
# =====================================================================
# Month/year strings for use in table subdirectories (e.g. Jan2016)
bmk_mon_yr_strs_dev = [v + bmk_year_dev for v in bmk_mon_strs]
# Get days per month and seconds per month for dev
sec_per_month_dev = np.zeros(12)
days_per_month_dev = np.zeros(12)
for t in range(12):
days_per_month_dev[t] = monthrange(int(bmk_year_dev), t + 1)[1]
sec_per_month_dev[t] = days_per_month_dev[t] * 86400.0
# Get all months array of start datetimes for benchmark year
bmk_start_dev = np.datetime64(bmk_year_dev + "-01-01")
bmk_end_dev = np.datetime64("{}-01-01".format(int(bmk_year_dev) + 1))
all_months_dev = np.arange(
bmk_start_dev, bmk_end_dev, step=np.timedelta64(1, "M"), dtype="datetime64[M]"
)
all_months_gchp_dev = all_months_dev
# Reset all months datetime array if GCHP dev is legacy filename format.
# Legacy format uses time-averaging period mid-point not start.
if config["data"]["dev"]["gchp"]["is_legacy"]:
all_months_gchp_dev = np.zeros(12, dtype="datetime64[h]")
for t in range(12):
middle_hr = int(days_per_month_dev[t] * 24 / 2)
delta = np.timedelta64(middle_hr, "h")
all_months_gchp_dev[t] = all_months_dev[t].astype("datetime64[h]") + delta
# Get subset of month datetimes and seconds per month for only benchmark months
bmk_mons_dev = all_months_dev[bmk_mon_inds]
bmk_mons_gchp_dev = all_months_gchp_dev[bmk_mon_inds]
bmk_sec_per_month_dev = sec_per_month_dev[bmk_mon_inds]
# ======================================================================
# Print the list of plots & tables to the screen
# ======================================================================
print("The following plots and tables will be created for {}:".format(bmk_type))
if config["options"]["outputs"]["plot_conc"]:
print(" - Concentration plots")
if config["options"]["outputs"]["plot_emis"]:
print(" - Emissions plots")
if config["options"]["outputs"]["plot_jvalues"]:
print(" - J-values (photolysis rates) plots")
if config["options"]["outputs"]["plot_aod"]:
print(" - Aerosol optical depth plots")
if config["options"]["outputs"]["ops_budget_table"]:
print(" - Operations budget tables")
if config["options"]["outputs"]["aer_budget_table"]:
print(" - Aerosol budget/burden tables")
if config["options"]["outputs"]["emis_table"]:
print(" - Table of emissions totals by species and inventory")
if config["options"]["outputs"]["mass_table"]:
print(" - Table of species mass")
if config["options"]["outputs"]["OH_metrics"]:
print(" - Table of OH metrics")
if config["options"]["outputs"]["ste_table"]:
print(" - Table of strat-trop exchange")
print("Comparisons will be made for the following combinations:")
if config["options"]["comparisons"]["gcc_vs_gcc"]["run"]:
print(" - GCC vs GCC")
if config["options"]["comparisons"]["gchp_vs_gcc"]["run"]:
print(" - GCHP vs GCC")
if config["options"]["comparisons"]["gchp_vs_gchp"]["run"]:
print(" - GCHP vs GCHP")
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCC vs GCC benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["comparisons"]["gcc_vs_gcc"]["run"]:
# ==================================================================
# GCC vs GCC filepaths for StateMet collection data
# ==================================================================
refmet = get_filepaths(gcc_vs_gcc_refdir, "StateMet", all_months_ref)[0]
devmet = get_filepaths(gcc_vs_gcc_devdir, "StateMet", all_months_dev)[0]
# ==================================================================
# GCC vs GCC species concentration plots
#
# Includes lumped species and separates by category if plot_by_spc_cat
# is true; otherwise excludes lumped species and writes to one file.
# --------------------------------------------------------------
if config["options"]["outputs"]["plot_conc"]:
print("\n%%% Creating GCC vs. GCC concentration plots %%%")
# --------------------------------------------------------------
# GCC vs GCC species concentration plots: Annual mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(gcc_vs_gcc_refdir, "SpeciesConc", all_months_ref)[0]
dev = get_filepaths(gcc_vs_gcc_devdir, "SpeciesConc", all_months_dev)[0]
# Create plots
print("\nCreating plots for | |
of `str`
list of strings to parse. The default is taken from `sys.argv`.
Returns
-------
`argparse.Namespace`
namespace object containing processed given arguments and/or default
options.
"""
description = "Produce perturbed files containing sampled parameters that "
"represent the information\nstored in the evaluated nuclear "
"data covariances"
parser = argparse.ArgumentParser(
prog="sandy",
description=description,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument('file',
type=lambda x: is_valid_file(parser, x),
help="ENDF-6 or PENDF format file")
parser.add_argument('--acer',
default=False,
action="store_true",
help="for each perturbed file, produce ACE files\n"
"(argument file must be in ENDF-6 format, not PENDF)\n(argument temperature is required)\n(default = False)")
parser.add_argument('--cov', '-C',
type=lambda x: is_valid_file(parser, x),
help="file containing covariances")
parser.add_argument('--cov33csv',
type=lambda x: is_valid_file(parser, x),
help="file containing xs/nubar covariances in csv "
"format")
parser.add_argument('--debug',
default=False,
action="store_true",
help="turn on debug mode")
parser.add_argument('--eig',
type=int,
default=10,
metavar="N",
help="print the first N eigenvalues of the evaluated covariance matrices\n(default = do not print)")
parser.add_argument('--energy-sequence', '-E',
type=int,
metavar="EL",
default=49,
help=argparse.SUPPRESS)
parser.add_argument('--errorr',
default=False,
action="store_true",
help="run NJOY module ERRORR to produce covariance "
"matrix for xs data (default = False)")
parser.add_argument('--fission-yields', '-F',
default=False,
action="store_true",
help="input <file> contains fission yields")
parser.add_argument('--mat',
type=int,
default=list(range(1, 10000)),
action='store',
nargs="+",
metavar="{1,..,9999}",
help="draw samples only from the selected MAT "
"sections (default = keep all)")
parser.add_argument('--max-polynomial', '-P',
type=int,
help="Maximum order of Legendre polynomial coefficients considered for sampling (default = all)")
parser.add_argument('--mf',
type=int,
default=[31, 33, 34, 35],
action='store',
nargs="+",
metavar="{31,33,34,35}",
help="draw samples only from the selected MF sections "
"(default = keep all)")
parser.add_argument('--mt',
type=int,
default=list(range(1, 1000)),
action='store',
nargs="+",
metavar="{1,..,999}",
help="draw samples only from the selected MT sections "
"(default = keep all)")
parser.add_argument('--njoy',
type=lambda x: is_valid_file(parser, x),
default=None,
help="NJOY executable "
"(default search PATH, and env variable NJOY)")
parser.add_argument('--outdir', '-D',
metavar="DIR",
default=os.getcwd(),
type=lambda x: is_valid_dir(parser, x, mkdir=True),
help="target directory where outputs are stored\n(default = current working directory)\nif it does not exist it will be created")
parser.add_argument('--outname', '-O',
type=str,
help="basename for the output files "
"(default is the the basename of <file>.)")
parser.add_argument('--processes', '-N',
type=int,
default=1,
help="number of worker processes (default = 1)")
parser.add_argument('--samples', '-S',
type=int,
default=200,
help="number of samples (default = 200)")
parser.add_argument('--seed31',
type=int,
default=None,
metavar="S31",
help="seed for random sampling of MF31 covariance "
"matrix (default = random)")
parser.add_argument('--seed33',
type=int,
default=None,
metavar="S33",
help="seed for random sampling of MF33 covariance "
"matrix (default = random)")
parser.add_argument('--seed34',
type=int,
default=None,
metavar="S34",
help="seed for random sampling of MF34 covariance "
"matrix (default = random)")
parser.add_argument('--seed35',
type=int,
default=None,
metavar="S35",
help="seed for random sampling of MF35 covariance "
"matrix (default = random)")
parser.add_argument('--temperatures', '-T',
default=[],
type=float,
action='store',
nargs="+",
metavar="T",
help="for each perturbed file, produce ACE files at "
"given temperatures")
parser.add_argument("--version", "-v",
action='version',
version='%(prog)s {}'.format(sandy.__version__),
help="SANDY's version.")
init = parser.parse_known_args(args=iargs)[0]
if init.acer and not init.temperatures:
parser.error("--acer requires --temperatures")
if init.acer and sandy.formats.get_file_format(init.file) != "endf6":
parser.error("--acer requires file in 'endf6' format")
return init
def extract_samples(ftape, covtape):
"""
Draw samples using all covariance sections in the given tape.
"""
global init
# EXTRACT FY PERTURBATIONS FROM COV FILE
PertFy = pd.DataFrame()
if 8 in covtape.mf and 454 in ftape.mt:
fy = ftape.get_fy(listmat=init.mat, listmt=init.mt)
if not fy.empty:
index = fy.index.to_frame(index=False)
dfperts = []
for mat,dfmat in index.groupby("MAT"):
for mt,dfmt in dfmat.groupby("MT"):
for e,dfe in dfmt.groupby("E"):
fycov = fy.get_cov(mat, mt, e)
pert = fycov.get_samples(init.samples, eig=0)
dfperts.append(pert)
PertFy = FySamples(pd.concat(dfperts))
if init.debug:
PertFy.to_csv("perts_mf8.csv")
# EXTRACT NUBAR PERTURBATIONS FROM ENDF6 FILE
PertNubar = pd.DataFrame()
if 31 in init.mf and 31 in ftape.mf:
nubarcov = XsCov.from_endf6(covtape.filter_by(listmat=init.mat, listmf=[31], listmt=init.mt))
if not nubarcov.empty:
PertNubar = nubarcov.get_samples(init.samples, eig=init.eig)
if init.debug:
PertNubar.to_csv("perts_mf31.csv")
# EXTRACT PERTURBATIONS FROM EDISTR COV FILE
PertEdistr = pd.DataFrame()
if 35 in init.mf and 35 in ftape.mf:
edistrcov = ftape.get_edistr_cov()
if not edistrcov.empty:
PertEdistr = edistrcov.get_samples(init.samples, eig=init.eig)
if init.debug:
PertEdistr.to_csv("perts_mf35.csv")
# EXTRACT PERTURBATIONS FROM LPC COV FILE
PertLpc = pd.DataFrame()
if 34 in init.mf and 34 in covtape.mf:
lpccov = ftape.get_lpc_cov()
if not lpccov.empty:
if init.max_polynomial:
lpccov = lpccov.filter_p(init.max_polynomial)
PertLpc = lpccov.get_samples(init.samples, eig=init.eig)
if init.debug:
PertLpc.to_csv("perts_mf34.csv")
# EXTRACT XS PERTURBATIONS FROM COV FILE
PertXs = pd.DataFrame()
if 33 in init.mf and 33 in covtape.mf:
# This part is to get the pendf file
if ftape.get_file_format() == "endf6":
endf6 = sandy.Endf6.from_file(init.file)
pendf = endf6.get_pendf(njoy=init.njoy)
with tempfile.TemporaryDirectory() as td:
dst = os.path.join(td, "merged")
endf6.merge_pendf(pendf).to_file(dst)
ftape = read_formatted_file(dst)
if init.errorr:
if len(ftape.mat) > 1:
# Limit imposed by running ERRORR to get covariance matrices
raise sandy.Error("More than one MAT number was found")
endf6 = sandy.Endf6.from_file(init.file)
covtape = endf6.get_errorr(njoy=init.njoy)
# with tempfile.TemporaryDirectory() as td:
# outputs = njoy.process(init.file, broadr=False, thermr=False,
# unresr=False, heatr=False, gaspr=False,
# purr=False, errorr=init.errorr, acer=False,
# wdir=td, keep_pendf=True, exe=init.njoy,
# temperatures=[0], suffixes=[0], err=0.005)[2]
# ptape = read_formatted_file(outputs["tape30"])
# if init.debug: shutil.move(outputs["tape30"], os.path.join(init.outdir, "tape30"))
# if init.errorr:
# covtape = read_formatted_file(outputs["tape33"]) # WARNING: by doing this we delete the original covtape
# if init.debug: shutil.move(outputs["tape33"], os.path.join(init.outdir, "tape33"))
# ftape = ftape.delete_sections((None, 3, None)). \
# add_sections(ptape.filter_by(listmf=[3])). \
# add_sections(ptape.filter_by(listmf=[1], listmt=[451]))
listmt = sorted(set(init.mt + [451])) # ERRORR needs MF1/MT451 to get the energy grid
covtape = covtape.filter_by(listmat=init.mat, listmf=[1,33], listmt=listmt)
xscov = XsCov(covtape.get_cov(multigroup=False).data) if isinstance(covtape, sandy.errorr.Errorr) else XsCov.from_endf6(covtape)
if not xscov.empty:
PertXs = xscov.get_samples(init.samples, eig=init.eig, seed=init.seed33)
if init.debug:
PertXs.to_csv(os.path.join(init.outdir, "perts_mf33.csv"))
return ftape, covtape, PertNubar, PertXs, PertLpc, PertEdistr, PertFy
def sampling_csv33(ftape, csv):
cov = sandy.CategoryCov.from_csv(csv)
return sandy.XsCov(cov).get_samples(
init.samples,
eig=init.eig,
seed=init.seed33
)
def sampling(iargs=None):
"""
Construct multivariate normal distributions with a unit vector for
mean and with relative covariances taken from the evaluated files.
Perturbation factors are sampled with the same multigroup structure of
the covariance matrix, and are applied to the pointwise data to produce
the perturbed files.
"""
global init, pnu, pxs, plpc, pchi, pfy, tape
init = parse(iargs)
ftape = read_formatted_file(init.file)
if init.cov33csv:
logging.warning("found argument '--cov33csv', will skip any other"
" covariance")
catcov = sandy.CategoryCov.from_csv(
init.cov33csv,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
covtape = xscov = sandy.XsCov(catcov.data)
# This part is to get the pendf file
if ftape.get_file_format() == "endf6":
endf6 = sandy.Endf6.from_file(init.file)
pendf = endf6.get_pendf(njoy=init.njoy)
with tempfile.TemporaryDirectory() as td:
dst = os.path.join(td, "merged")
endf6.merge_pendf(pendf).to_file(dst)
ftape = read_formatted_file(dst)
# if ftape.get_file_format() == "endf6":
# with tempfile.TemporaryDirectory() as td:
# outputs = njoy.process(init.file, broadr=False, thermr=False,
# unresr=False, heatr=False, gaspr=False,
# purr=False, errorr=init.errorr, acer=False,
# wdir=td, keep_pendf=True, exe=init.njoy,
# temperatures=[0], suffixes=[0], err=0.005)[2]
# ptape = read_formatted_file(outputs["tape30"])
# if init.debug:
# shutil.move(outputs["tape30"], os.path.join(init.outdir, "tape30"))
# ftape = ftape.delete_sections((None, 3, None)). \
# add_sections(ptape.filter_by(listmf=[3])). \
# add_sections(ptape.filter_by(listmf=[1], listmt=[451]))
pxs = xscov.get_samples(init.samples, eig=init.eig, seed=init.seed33)
cn = sandy.Samples(pxs).condition_number
print(f"Condition number : {cn:>15}")
pnu = plpc = pchi = pfy = pd.DataFrame()
if init.debug:
pxs.to_csv(os.path.join(init.outdir, "perts_mf33.csv"))
else:
covtape = read_formatted_file(init.cov) if init.cov else ftape
ftape, covtape, pnu, pxs, plpc, pchi, pfy = extract_samples(ftape, covtape)
df = {}
if pnu.empty and pxs.empty and plpc.empty and pchi.empty and pfy.empty:
logging.warn("no covariance section was selected/found")
return ftape, covtape, df
# APPLY PERTURBATIONS BY MAT
for imat, (mat, tape) in enumerate(sorted(ftape.groupby('MAT'))):
skip_title = False if imat == 0 else True
skip_fend = False if imat == len(ftape.mat) - 1 else True
tape = Endf6(tape)
kw = dict(skip_title=skip_title, skip_fend=skip_fend)
if platform.system() == "Windows":
proc = 1
logging.info("Running on Windows does not allow parallel "
"processing")
else:
proc = init.processes
seq = range(1, init.samples + 1)
if proc == 1:
outs = {i: _sampling_mp(i, **kw) for i in seq}
else:
pool = mp.Pool(processes=proc)
outs = {i: pool.apply_async(_sampling_mp, (i,), kw) for i in seq}
outs = {i: out.get() for i, out in outs.items()}
pool.close()
pool.join()
df.update({mat: outs})
# DUMP TO FILES
frame = pd.DataFrame(df)
frame.index.name = "SMP"
frame.columns.name = "MAT"
frame = frame.stack()
outname = init.outname if init.outname else os.path.split(init.file)[1]
for ismp,dfsmp in frame.groupby("SMP"):
output = os.path.join(init.outdir, '{}-{}'.format(outname, ismp))
with open(output, 'w') as f:
for mat,dfmat in dfsmp.groupby("MAT"):
f.write(frame[ismp,mat])
# PRODUCE ACE FILES
if init.acer:
seq = range(1, init.samples + 1)
if init.processes == 1:
for i in seq:
_process_into_ace(i)
else:
pool = mp.Pool(processes=init.processes)
outs = {i: pool.apply_async(_process_into_ace, (i,)) for i in seq}
pool.close()
pool.join()
return ftape, covtape, df
pdb.set_trace()
df = {}
if init.fission_yields:
# EXTRACT FY PERTURBATIONS FROM COV FILE
fy = ftape.get_fy(listmat=init.mat, listmt=init.mt)
if fy.empty:
logging.warn("no fission yield section was selected/found")
return
index = fy.index.to_frame(index=False)
dfperts = []
for mat,dfmat in index.groupby("MAT"):
for mt,dfmt in dfmat.groupby("MT"):
for e,dfe in dfmt.groupby("E"):
fycov = fy.get_cov(mat, mt, | |
_cv.cvGetND(*args)
def cvGetReal1D(*args):
"""cvGetReal1D(CvArr arr, int idx0) -> double"""
return _cv.cvGetReal1D(*args)
def cvGetReal2D(*args):
"""cvGetReal2D(CvArr arr, int idx0, int idx1) -> double"""
return _cv.cvGetReal2D(*args)
def cvGetReal3D(*args):
"""cvGetReal3D(CvArr arr, int idx0, int idx1, int idx2) -> double"""
return _cv.cvGetReal3D(*args)
def cvGetRealND(*args):
"""cvGetRealND(CvArr arr, int idx) -> double"""
return _cv.cvGetRealND(*args)
def cvSet1D(*args):
"""cvSet1D(CvArr arr, int idx0, CvScalar value)"""
return _cv.cvSet1D(*args)
def cvSet2D(*args):
"""cvSet2D(CvArr arr, int idx0, int idx1, CvScalar value)"""
return _cv.cvSet2D(*args)
def cvSet3D(*args):
"""cvSet3D(CvArr arr, int idx0, int idx1, int idx2, CvScalar value)"""
return _cv.cvSet3D(*args)
def cvSetND(*args):
"""cvSetND(CvArr arr, int idx, CvScalar value)"""
return _cv.cvSetND(*args)
def cvSetReal1D(*args):
"""cvSetReal1D(CvArr arr, int idx0, double value)"""
return _cv.cvSetReal1D(*args)
def cvSetReal2D(*args):
"""cvSetReal2D(CvArr arr, int idx0, int idx1, double value)"""
return _cv.cvSetReal2D(*args)
def cvSetReal3D(*args):
"""cvSetReal3D(CvArr arr, int idx0, int idx1, int idx2, double value)"""
return _cv.cvSetReal3D(*args)
def cvSetRealND(*args):
"""cvSetRealND(CvArr arr, int idx, double value)"""
return _cv.cvSetRealND(*args)
def cvClearND(*args):
"""cvClearND(CvArr arr, int idx)"""
return _cv.cvClearND(*args)
def cvGetMat(*args):
"""cvGetMat(CvArr arr, CvMat header, int coi=None, int allowND=0) -> CvMat"""
return _cv.cvGetMat(*args)
def cvReshapeMatND(*args):
"""
cvReshapeMatND(CvArr arr, int sizeof_header, CvArr header, int new_cn,
int new_dims, int new_sizes) -> CvArr
"""
return _cv.cvReshapeMatND(*args)
def cvReshape(*args):
"""cvReshape(CvArr arr, CvMat header, int new_cn, int new_rows=0) -> CvMat"""
return _cv.cvReshape(*args)
def cvRepeat(*args):
"""cvRepeat(CvArr src, CvArr dst)"""
return _cv.cvRepeat(*args)
def cvCreateData(*args):
"""cvCreateData(CvArr arr)"""
return _cv.cvCreateData(*args)
def cvReleaseData(*args):
"""cvReleaseData(CvArr arr)"""
return _cv.cvReleaseData(*args)
def cvSetData(*args):
"""cvSetData(CvArr arr, void data, int step)"""
return _cv.cvSetData(*args)
def cvGetRawData(*args):
"""cvGetRawData(CvArr arr, uchar data, int step=None, CvSize roi_size=None)"""
return _cv.cvGetRawData(*args)
def cvGetSize(*args):
"""cvGetSize(CvArr arr) -> CvSize"""
return _cv.cvGetSize(*args)
def cvCopy(*args):
"""cvCopy(CvArr src, CvArr dst, CvArr mask=None)"""
return _cv.cvCopy(*args)
def cvSet(*args):
"""cvSet(CvArr arr, CvScalar value, CvArr mask=None)"""
return _cv.cvSet(*args)
def cvSetZero(*args):
"""cvSetZero(CvArr arr)"""
return _cv.cvSetZero(*args)
def cvSplit(*args):
"""cvSplit(CvArr src, CvArr dst0, CvArr dst1, CvArr dst2, CvArr dst3)"""
return _cv.cvSplit(*args)
def cvMerge(*args):
"""cvMerge(CvArr src0, CvArr src1, CvArr src2, CvArr src3, CvArr dst)"""
return _cv.cvMerge(*args)
def cvMixChannels(*args):
"""
cvMixChannels(CvArr src, int src_count, CvArr dst, int dst_count,
int from_to, int pair_count)
"""
return _cv.cvMixChannels(*args)
def cvConvertScale(*args):
"""cvConvertScale(CvArr src, CvArr dst, double scale=1, double shift=0)"""
return _cv.cvConvertScale(*args)
def cvConvertScaleAbs(*args):
"""cvConvertScaleAbs(CvArr src, CvArr dst, double scale=1, double shift=0)"""
return _cv.cvConvertScaleAbs(*args)
def cvCheckTermCriteria(*args):
"""cvCheckTermCriteria(CvTermCriteria criteria, double default_eps, int default_max_iters) -> CvTermCriteria"""
return _cv.cvCheckTermCriteria(*args)
def cvAdd(*args):
"""cvAdd(CvArr src1, CvArr src2, CvArr dst, CvArr mask=None)"""
return _cv.cvAdd(*args)
def cvAddS(*args):
"""cvAddS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvAddS(*args)
def cvSub(*args):
"""cvSub(CvArr src1, CvArr src2, CvArr dst, CvArr mask=None)"""
return _cv.cvSub(*args)
def cvSubS(*args):
"""cvSubS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvSubS(*args)
def cvSubRS(*args):
"""cvSubRS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvSubRS(*args)
def cvMul(*args):
"""cvMul(CvArr src1, CvArr src2, CvArr dst, double scale=1)"""
return _cv.cvMul(*args)
def cvDiv(*args):
"""cvDiv(CvArr src1, CvArr src2, CvArr dst, double scale=1)"""
return _cv.cvDiv(*args)
def cvScaleAdd(*args):
"""cvScaleAdd(CvArr src1, CvScalar scale, CvArr src2, CvArr dst)"""
return _cv.cvScaleAdd(*args)
def cvAddWeighted(*args):
"""
cvAddWeighted(CvArr src1, double alpha, CvArr src2, double beta,
double gamma, CvArr dst)
"""
return _cv.cvAddWeighted(*args)
def cvDotProduct(*args):
"""cvDotProduct(CvArr src1, CvArr src2) -> double"""
return _cv.cvDotProduct(*args)
def cvAnd(*args):
"""cvAnd(CvArr src1, CvArr src2, CvArr dst, CvArr mask=None)"""
return _cv.cvAnd(*args)
def cvAndS(*args):
"""cvAndS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvAndS(*args)
def cvOr(*args):
"""cvOr(CvArr src1, CvArr src2, CvArr dst, CvArr mask=None)"""
return _cv.cvOr(*args)
def cvOrS(*args):
"""cvOrS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvOrS(*args)
def cvXor(*args):
"""cvXor(CvArr src1, CvArr src2, CvArr dst, CvArr mask=None)"""
return _cv.cvXor(*args)
def cvXorS(*args):
"""cvXorS(CvArr src, CvScalar value, CvArr dst, CvArr mask=None)"""
return _cv.cvXorS(*args)
def cvNot(*args):
"""cvNot(CvArr src, CvArr dst)"""
return _cv.cvNot(*args)
def cvInRange(*args):
"""cvInRange(CvArr src, CvArr lower, CvArr upper, CvArr dst)"""
return _cv.cvInRange(*args)
def cvInRangeS(*args):
"""cvInRangeS(CvArr src, CvScalar lower, CvScalar upper, CvArr dst)"""
return _cv.cvInRangeS(*args)
def cvCmp(*args):
"""cvCmp(CvArr src1, CvArr src2, CvArr dst, int cmp_op)"""
return _cv.cvCmp(*args)
def cvCmpS(*args):
"""cvCmpS(CvArr src, double value, CvArr dst, int cmp_op)"""
return _cv.cvCmpS(*args)
def cvMin(*args):
"""cvMin(CvArr src1, CvArr src2, CvArr dst)"""
return _cv.cvMin(*args)
def cvMax(*args):
"""cvMax(CvArr src1, CvArr src2, CvArr dst)"""
return _cv.cvMax(*args)
def cvMinS(*args):
"""cvMinS(CvArr src, double value, CvArr dst)"""
return _cv.cvMinS(*args)
def cvMaxS(*args):
"""cvMaxS(CvArr src, double value, CvArr dst)"""
return _cv.cvMaxS(*args)
def cvAbsDiff(*args):
"""cvAbsDiff(CvArr src1, CvArr src2, CvArr dst)"""
return _cv.cvAbsDiff(*args)
def cvAbsDiffS(*args):
"""cvAbsDiffS(CvArr src, CvArr dst, CvScalar value)"""
return _cv.cvAbsDiffS(*args)
def cvCartToPolar(*args):
"""
cvCartToPolar(CvArr x, CvArr y, CvArr magnitude, CvArr angle=None,
int angle_in_degrees=0)
"""
return _cv.cvCartToPolar(*args)
def cvPolarToCart(*args):
"""cvPolarToCart(CvArr magnitude, CvArr angle, CvArr x, CvArr y, int angle_in_degrees=0)"""
return _cv.cvPolarToCart(*args)
def cvPow(*args):
"""cvPow(CvArr src, CvArr dst, double power)"""
return _cv.cvPow(*args)
def cvExp(*args):
"""cvExp(CvArr src, CvArr dst)"""
return _cv.cvExp(*args)
def cvLog(*args):
"""cvLog(CvArr src, CvArr dst)"""
return _cv.cvLog(*args)
def cvFastArctan(*args):
"""cvFastArctan(float y, float x) -> float"""
return _cv.cvFastArctan(*args)
def cvCbrt(*args):
"""cvCbrt(float value) -> float"""
return _cv.cvCbrt(*args)
def cvCheckArr(*args):
"""cvCheckArr(CvArr arr, int flags=0, double min_val=0, double max_val=0) -> int"""
return _cv.cvCheckArr(*args)
def cvRandArr(*args):
"""
cvRandArr(CvRNG rng, CvArr arr, int dist_type, CvScalar param1,
CvScalar param2)
"""
return _cv.cvRandArr(*args)
def cvRandShuffle(*args):
"""cvRandShuffle(CvArr mat, CvRNG rng, double iter_factor=1.)"""
return _cv.cvRandShuffle(*args)
def cvSort(*args):
"""cvSort(CvArr src, CvArr dst=None, CvArr idxmat=None, int flags=0)"""
return _cv.cvSort(*args)
def cvSolveCubic(*args):
"""cvSolveCubic(CvMat coeffs, CvMat roots) -> int"""
return _cv.cvSolveCubic(*args)
def cvSolvePoly(*args):
"""cvSolvePoly(CvMat coeffs, CvMat roots, int maxiter=0, int fig=0)"""
return _cv.cvSolvePoly(*args)
def cvCrossProduct(*args):
"""cvCrossProduct(CvArr src1, CvArr src2, CvArr dst)"""
return _cv.cvCrossProduct(*args)
def cvGEMM(*args):
"""
cvGEMM(CvArr src1, CvArr src2, double alpha, CvArr src3, double beta,
CvArr dst, int tABC=0)
"""
return _cv.cvGEMM(*args)
def cvTransform(*args):
"""cvTransform(CvArr src, CvArr dst, CvMat transmat, CvMat shiftvec=None)"""
return _cv.cvTransform(*args)
def cvPerspectiveTransform(*args):
"""cvPerspectiveTransform(CvArr src, CvArr dst, CvMat mat)"""
return _cv.cvPerspectiveTransform(*args)
def cvMulTransposed(*args):
"""
cvMulTransposed(CvArr src, CvArr dst, int order, CvArr delta=None,
double scale=1.)
"""
return _cv.cvMulTransposed(*args)
def cvTranspose(*args):
"""cvTranspose(CvArr src, CvArr dst)"""
return _cv.cvTranspose(*args)
def cvCompleteSymm(*args):
"""cvCompleteSymm(CvMat matrix, int LtoR=0)"""
return _cv.cvCompleteSymm(*args)
def cvFlip(*args):
"""cvFlip(CvArr src, CvArr dst=None, int flip_mode=0)"""
return _cv.cvFlip(*args)
def cvSVD(*args):
"""cvSVD(CvArr A, CvArr W, CvArr U=None, CvArr V=None, int flags=0)"""
return _cv.cvSVD(*args)
def cvSVBkSb(*args):
"""cvSVBkSb(CvArr W, CvArr U, CvArr V, CvArr B, CvArr X, int flags)"""
return _cv.cvSVBkSb(*args)
def cvInvert(*args):
"""cvInvert(CvArr src, CvArr dst, int method=0) -> double"""
return _cv.cvInvert(*args)
def cvSolve(*args):
"""cvSolve(CvArr src1, CvArr src2, CvArr dst, int method=0) -> int"""
return _cv.cvSolve(*args)
def cvDet(*args):
"""cvDet(CvArr mat) -> double"""
return _cv.cvDet(*args)
def cvTrace(*args):
"""cvTrace(CvArr mat) -> CvScalar"""
return _cv.cvTrace(*args)
def cvEigenVV(*args):
"""cvEigenVV(CvArr mat, CvArr evects, CvArr evals, double eps=0)"""
return _cv.cvEigenVV(*args)
def cvSetIdentity(*args):
"""cvSetIdentity(CvArr mat, CvScalar value=cvRealScalar(1))"""
return _cv.cvSetIdentity(*args)
def cvRange(*args):
"""cvRange(CvArr mat, double start, double end) -> CvArr"""
return _cv.cvRange(*args)
def cvCalcCovarMatrix(*args):
"""cvCalcCovarMatrix(CvArr vects, int count, CvArr cov_mat, CvArr avg, int flags)"""
return _cv.cvCalcCovarMatrix(*args)
def cvCalcPCA(*args):
"""
cvCalcPCA(CvArr data, CvArr mean, CvArr eigenvals, CvArr eigenvects,
int flags)
"""
return _cv.cvCalcPCA(*args)
def cvProjectPCA(*args):
"""cvProjectPCA(CvArr data, CvArr mean, CvArr eigenvects, CvArr result)"""
return _cv.cvProjectPCA(*args)
def cvBackProjectPCA(*args):
"""cvBackProjectPCA(CvArr proj, CvArr mean, CvArr eigenvects, CvArr result)"""
return _cv.cvBackProjectPCA(*args)
def cvMahalanobis(*args):
"""cvMahalanobis(CvArr vec1, CvArr vec2, CvArr mat) -> double"""
return _cv.cvMahalanobis(*args)
def cvSum(*args):
"""cvSum(CvArr arr) -> CvScalar"""
return _cv.cvSum(*args)
def cvCountNonZero(*args):
"""cvCountNonZero(CvArr arr) -> int"""
return _cv.cvCountNonZero(*args)
def cvAvg(*args):
"""cvAvg(CvArr arr, CvArr mask=None) -> CvScalar"""
return _cv.cvAvg(*args)
def cvAvgSdv(*args):
"""cvAvgSdv(CvArr arr, CvScalar mean, CvScalar std_dev, CvArr mask=None)"""
return _cv.cvAvgSdv(*args)
def cvMinMaxLoc(*args):
"""
cvMinMaxLoc(CvArr arr, double min_val, double max_val, CvPoint min_loc=None,
CvPoint max_loc=None, CvArr mask=None)
"""
return _cv.cvMinMaxLoc(*args)
def cvNorm(*args):
"""cvNorm(CvArr arr1, CvArr arr2=None, int norm_type=4, CvArr mask=None) -> double"""
return _cv.cvNorm(*args)
def cvNormalize(*args):
"""
cvNormalize(CvArr src, CvArr dst, double a=1., double b=0., int norm_type=4,
CvArr mask=None)
"""
return _cv.cvNormalize(*args)
def cvReduce(*args):
"""cvReduce(CvArr src, CvArr dst, int dim=-1, int op=0)"""
return _cv.cvReduce(*args)
def cvDFT(*args):
"""cvDFT(CvArr src, CvArr dst, int flags, int nonzero_rows=0)"""
return _cv.cvDFT(*args)
def cvMulSpectrums(*args):
"""cvMulSpectrums(CvArr src1, CvArr src2, CvArr dst, int flags)"""
return _cv.cvMulSpectrums(*args)
def cvGetOptimalDFTSize(*args):
"""cvGetOptimalDFTSize(int size0) -> int"""
return _cv.cvGetOptimalDFTSize(*args)
def cvDCT(*args):
"""cvDCT(CvArr src, CvArr dst, int flags)"""
return _cv.cvDCT(*args)
def cvSliceLength(*args):
"""cvSliceLength(CvSlice slice, CvSeq seq) -> int"""
return _cv.cvSliceLength(*args)
def cvCreateMemStorage(*args):
"""cvCreateMemStorage(int block_size=0) -> CvMemStorage"""
return _cv.cvCreateMemStorage(*args)
def cvCreateChildMemStorage(*args):
"""cvCreateChildMemStorage(CvMemStorage parent) -> CvMemStorage"""
return _cv.cvCreateChildMemStorage(*args)
def cvClearMemStorage(*args):
"""cvClearMemStorage(CvMemStorage storage)"""
return _cv.cvClearMemStorage(*args)
def cvSaveMemStoragePos(*args):
"""cvSaveMemStoragePos(CvMemStorage storage, CvMemStoragePos pos)"""
return _cv.cvSaveMemStoragePos(*args)
def cvRestoreMemStoragePos(*args):
"""cvRestoreMemStoragePos(CvMemStorage storage, CvMemStoragePos pos)"""
return _cv.cvRestoreMemStoragePos(*args)
def cvMemStorageAlloc(*args):
"""cvMemStorageAlloc(CvMemStorage storage, size_t size) -> void"""
return _cv.cvMemStorageAlloc(*args)
def cvMemStorageAllocString(*args):
"""cvMemStorageAllocString(CvMemStorage storage, char ptr, int len=-1) -> CvString"""
return _cv.cvMemStorageAllocString(*args)
def cvCreateSeq(*args):
"""cvCreateSeq(int seq_flags, int header_size, int elem_size, CvMemStorage storage) -> CvSeq"""
return _cv.cvCreateSeq(*args)
def cvSetSeqBlockSize(*args):
"""cvSetSeqBlockSize(CvSeq seq, int delta_elems)"""
return _cv.cvSetSeqBlockSize(*args)
def cvSeqPush(*args):
"""cvSeqPush(CvSeq seq, void element=None) -> schar"""
return _cv.cvSeqPush(*args)
def cvSeqPushFront(*args):
"""cvSeqPushFront(CvSeq seq, void element=None) -> schar"""
return _cv.cvSeqPushFront(*args)
def cvSeqPop(*args):
"""cvSeqPop(CvSeq seq, void element=None)"""
return _cv.cvSeqPop(*args)
def cvSeqPopFront(*args):
"""cvSeqPopFront(CvSeq seq, void element=None)"""
return _cv.cvSeqPopFront(*args)
def cvSeqPushMulti(*args):
"""cvSeqPushMulti(CvSeq seq, void elements, int count, int in_front=0)"""
return _cv.cvSeqPushMulti(*args)
def cvSeqPopMulti(*args):
"""cvSeqPopMulti(CvSeq seq, void elements, int count, int in_front=0)"""
return _cv.cvSeqPopMulti(*args)
def cvSeqInsert(*args):
"""cvSeqInsert(CvSeq seq, int before_index, void element=None) -> schar"""
return _cv.cvSeqInsert(*args)
def cvSeqRemove(*args):
"""cvSeqRemove(CvSeq seq, int index)"""
return _cv.cvSeqRemove(*args)
def cvClearSeq(*args):
"""cvClearSeq(CvSeq | |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the adversary to create the environment, then the agents to play it.
Implements episode collection for the PAIRED algorithm, a minimax adversary, and
domain randomization. First runs the adversary to generate the environment, then
runs the main agent, and (if running PAIRED) the antagonist agent. The scores
of both agents are used to compute the regret, which is used to train the
adversary and the agents.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.trajectories import time_step as ts_lib
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
from tf_agents.utils import nest_utils
class AdversarialDriver(object):
"""Runs the environment adversary and agents to collect episodes."""
def __init__(self,
env,
agent,
adversary_agent,
adversary_env,
env_metrics=None,
collect=True,
disable_tf_function=False,
debug=False,
combined_population=False,
flexible_protagonist=False):
"""Runs the environment adversary and agents to collect episodes.
Args:
env: A tf_environment.Base environment.
agent: An AgentTrainPackage for the main learner agent.
adversary_agent: An AgentTrainPackage for the second agent, the
adversary's ally. This can be None if using an unconstrained adversary
environment.
adversary_env: An AgentTrainPackage for the agent that controls the
environment, learning to set parameters of the environment to decrease
the agent's score relative to the adversary_agent. Can be None if using
domain randomization.
env_metrics: Global environment metrics to track (such as path length).
collect: True if collecting episodes for training, otherwise eval.
disable_tf_function: If True the use of tf.function for the run method is
disabled.
debug: If True, outputs informative logging statements.
combined_population: If True, the entire population of protagonists plays
each generated environment, and regret is the calc'd as the difference
between the max of the population and the average (there are no explicit
antagonists).
flexible_protagonist: Which agent plays the role of protagonist in
calculating the regret depends on which has the lowest score.
"""
common.check_tf1_allowed()
self.debug = debug
self.total_episodes_collected = 0
if not disable_tf_function:
self.run = common.function(self.run, autograph=True)
self.run_agent = common.function(self.run_agent, autograph=True)
self.env_metrics = env_metrics
self.collect = collect
self.env = env
self.agent = agent
self.adversary_agent = adversary_agent
self.adversary_env = adversary_env
self.combined_population = combined_population
self.flexible_protagonist = flexible_protagonist
def run(self, random_episodes=False):
"""Runs 3 policies in same environment: environment, agent 1, agent 2."""
if random_episodes:
# Generates a random environment for both protagonist and antagonist
# to play.
agent_r_max, train_idxs = self.randomized_episode()
elif self.adversary_env is not None:
# Generates an environment using an adversary.
if self.combined_population:
agent_r_max, train_idxs = self.combined_population_adversarial_episode()
else:
agent_r_max, train_idxs = self.adversarial_episode()
else:
# Only one agent plays a randomly generated environment.
agent_r_max, train_idxs = self.domain_randomization_episode()
self.total_episodes_collected += agent_r_max.shape[0]
self.log_environment_metrics(agent_r_max)
return train_idxs
def adversarial_episode(self):
"""Episode in which adversary constructs environment and agents play it."""
# Build environment with adversary.
_, _, env_idx = self.run_agent(
self.env, self.adversary_env, self.env.reset, self.env.step_adversary)
train_idxs = {'adversary_env': [env_idx]}
# Run protagonist in generated environment.
agent_r_avg, agent_r_max, agent_idx = self.run_agent(
self.env, self.agent, self.env.reset_agent, self.env.step)
train_idxs['agent'] = [agent_idx]
# Run antagonist in generated environment.
if self.adversary_agent:
adv_agent_r_avg, adv_agent_r_max, antag_idx = self.run_agent(
self.env, self.adversary_agent, self.env.reset_agent, self.env.step)
train_idxs['adversary_agent'] = [antag_idx]
# Use agents' reward to compute and set regret-based rewards for PAIRED.
# By default, regret = max(antagonist) - mean(protagonist).
if self.adversary_agent:
self.adversary_agent[antag_idx].enemy_max = agent_r_max
self.agent[agent_idx].enemy_max = adv_agent_r_max
if self.flexible_protagonist:
# In flexible protagonist case, we find the best-performing agent
# and compute regret = max(best) - mean(other).
protagonist_better = tf.cast(
tf.math.greater(agent_r_max, adv_agent_r_max), tf.float32)
env_reward = protagonist_better * (agent_r_max - adv_agent_r_avg) + \
(1 - protagonist_better) * (adv_agent_r_max - agent_r_avg)
adv_agent_r_max = protagonist_better * agent_r_max + \
(1 - protagonist_better) * adv_agent_r_max
elif self.adversary_env[env_idx].non_negative_regret:
# Clip regret signal so that it can't go below zero.
env_reward = tf.math.maximum(adv_agent_r_max - agent_r_avg, 0)
else:
# Regret = max(antagonist) - mean(protagonist)
env_reward = adv_agent_r_max - agent_r_avg
# Add adversary block budget.
env_reward += self.compute_adversary_block_budget(
adv_agent_r_max, env_idx)
# Minimax adversary reward.
else:
env_reward = -agent_r_avg
self.adversary_env[env_idx].final_reward = env_reward
# Log metrics to tensorboard.
if self.collect:
self.adversary_env[env_idx].env_train_metric(env_reward)
else:
self.adversary_env[env_idx].env_eval_metric(env_reward)
# Log metrics to console.
if self.debug:
logging.info('Agent reward: avg = %f, max = %f',
tf.reduce_mean(agent_r_avg).numpy(),
tf.reduce_mean(agent_r_max).numpy())
logging.info('Environment score: %f',
tf.reduce_mean(env_reward).numpy())
if self.adversary_agent:
logging.info('Adversary agent reward: avg = %f, max = %f',
tf.reduce_mean(adv_agent_r_avg).numpy(),
tf.reduce_mean(adv_agent_r_max).numpy())
return agent_r_max, train_idxs
def combined_population_adversarial_episode(self):
"""Episode in which adversary constructs environment and agents play it."""
# Build environment with adversary.
_, _, env_idx = self.run_agent(
self.env, self.adversary_env, self.env.reset, self.env.step_adversary)
train_idxs = {'adversary_env': [env_idx], 'agent': []}
# Run all protagonist agents in generated environment.
means = []
maxs = []
for agent_idx in range(len(self.agent)):
agent_r_avg, agent_r_max, agent_idx_selected = self.run_agent(
self.env, self.agent, self.env.reset_agent, self.env.step,
agent_idx=agent_idx)
assert agent_idx == agent_idx_selected
means.append(agent_r_avg)
maxs.append(agent_r_max)
train_idxs['agent'].append(agent_idx)
# Stack into shape: [num agents in population, batch]
means = tf.stack(means)
maxs = tf.stack(maxs)
# Compute and set regret-based rewards for PAIRED.
population_max = tf.reduce_max(maxs, axis=0)
population_avg = tf.reduce_mean(means, axis=0)
regret = population_max - population_avg
if self.adversary_env[env_idx].non_negative_regret:
regret = tf.math.maximum(regret, 0)
for agent_idx in range(len(self.agent)):
self.agent[agent_idx].enemy_max = population_max
adv_r = regret + self.compute_adversary_block_budget(
population_max, env_idx)
self.adversary_env[env_idx].final_reward = adv_r
# Log metrics to tensorboard.
if self.collect:
self.adversary_env[env_idx].env_train_metric(adv_r)
else:
self.adversary_env[env_idx].env_eval_metric(adv_r)
# Log metrics to console.
if self.debug:
logging.info('Agent reward: avg = %f, max = %f',
tf.reduce_mean(population_avg).numpy(),
tf.reduce_max(population_max).numpy())
logging.info('Environment regret: %f',
tf.reduce_mean(regret).numpy())
return population_max, train_idxs
def log_environment_metrics(self, agent_r_max):
"""Log extra environment metrics."""
distance_to_goal = self.env.get_distance_to_goal()
num_blocks = self.env.get_num_blocks()
deliberate_placement = self.env.get_deliberate_placement()
env_episodes = [tf.convert_to_tensor(
self.total_episodes_collected, dtype=tf.float32)]
goal_x = self.env.get_goal_x()
goal_y = self.env.get_goal_y()
passable = self.env.get_passable()
shortest_path = self.env.get_shortest_path_length()
shortest_passable_path = passable * shortest_path
solved = tf.cast(agent_r_max > 0, tf.float32)
solved_path_length = solved * shortest_path
for i, m in enumerate([distance_to_goal, num_blocks,
deliberate_placement, env_episodes, goal_x, goal_y,
passable, shortest_path, shortest_passable_path,
solved_path_length]):
self.env_metrics[i](m)
if self.debug:
logging.info('Driver times invoked %d', self.total_episodes_collected)
logging.info('Num blocks: %f', tf.reduce_mean(num_blocks).numpy())
logging.info('Distance to goal: %f',
tf.reduce_mean(distance_to_goal).numpy())
logging.info('Deliberate agent placement: %f',
tf.reduce_mean(deliberate_placement).numpy())
logging.info('Goal (X, Y): (%f, %f)', tf.reduce_mean(goal_x).numpy(),
tf.reduce_mean(goal_y).numpy())
logging.info('Possible to finish environment?: %f',
tf.reduce_mean(passable).numpy())
logging.info('Shortest path length to goal: %f',
tf.reduce_mean(shortest_path).numpy())
logging.info('Solved path length: %f',
tf.reduce_mean(solved_path_length).numpy())
def domain_randomization_episode(self):
"""Use random reset function to create a randomized environment."""
# Randomly generate environment.
self.env.reset_random()
# Run single agent.
agent_r_avg, agent_r_max, agent_idx = self.run_agent(
self.env, self.agent, self.env.reset_agent, self.env.step)
train_idxs = {'agent': [agent_idx]}
if self.debug:
logging.info('Agent reward: avg = %f, max = %f',
tf.reduce_mean(agent_r_avg).numpy(),
tf.reduce_mean(agent_r_max).numpy())
return agent_r_max, train_idxs
def randomized_episode(self):
"""Both agent and adversary_agent play a randomized environment."""
# Randomly generate environment.
self.env.reset_random()
# Run protagonist agent.
agent_r_avg, agent_r_max, agent_idx = self.run_agent(
self.env, self.agent, self.env.reset_agent, self.env.step)
train_idxs = {'agent': [agent_idx]}
# Run antagonist agent.
if self.adversary_agent:
adv_agent_r_avg, adv_agent_r_max, antag_idx = self.run_agent(
self.env, self.adversary_agent, self.env.reset_agent, self.env.step)
train_idxs['adversary_agent'] = [antag_idx]
# Use agents' reward to compute and set regret-based rewards for PAIRED.
if self.adversary_agent:
self.adversary_agent[antag_idx].enemy_max = agent_r_max
self.agent[agent_idx].enemy_max = adv_agent_r_max
else:
self.agent[agent_idx].enemy_max = agent_r_max
if self.debug:
logging.info('Agent reward: avg = %f, max = %f',
tf.reduce_mean(agent_r_avg).numpy(),
tf.reduce_mean(agent_r_max).numpy())
if self.adversary_agent:
logging.info('Adversary agent reward: avg = %f, max = %f',
tf.reduce_mean(adv_agent_r_avg).numpy(),
tf.reduce_mean(adv_agent_r_max).numpy())
return agent_r_max, train_idxs
def run_agent(self, env, agent_list, reset_func, step_func, agent_idx=None):
"""Runs an agent in an environment given a step and reset function.
Args:
env: A TF-agents TF environment.
agent_list: A list of TrainAgentPackages, each of which contains an agent
that can be run in the environment. The agent to run will be randomly
selected from the list (to handle population based training).
reset_func: Callable function used to reset the environment.
step_func: Callable function used to step the environment.
agent_idx: The integer population index of the agent to run.
Returns:
The average reward achieved, the maximum reward, and the index of the
agent selected.
"""
if agent_idx is None:
agent_idx = np.random.choice(len(agent_list))
agent = agent_list[agent_idx]
if self.collect:
policy = agent.collect_policy
observers = agent.observers
else:
policy | |
<filename>acme/adders/reverb/transition.py<gh_stars>0
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transition adders.
This implements an N-step transition adder which collapses trajectory sequences
into a single transition, simplifying to a simple transition adder when N=1.
"""
import copy
import itertools
from typing import Optional,Union,List
from acme import specs
from acme import types
from acme import sc2_spec
from acme import sc2_types
from acme.adders.reverb import base
from acme.adders.reverb import utils
import numpy as np
import reverb
from reverb import reverb_types
import tree
import tensorflow as tf
class NStepTransitionAdder(base.ReverbAdder):
"""An N-step transition adder.
This will buffer a sequence of N timesteps in order to form a single N-step
transition which is added to reverb for future retrieval.
For N=1 the data added to replay will be a standard one-step transition which
takes the form:
(s_t, a_t, r_t, d_t, s_{t+1}, e_t)
where:
s_t = state observation at time t
a_t = the action taken from s_t
r_t = reward ensuing from action a_t
d_t = environment discount ensuing from action a_t. This discount is
applied to future rewards after r_t.
e_t [Optional] = extra data that the agent persists in replay.
For N greater than 1, transitions are of the form:
(s_t, a_t, R_{t:t+n}, D_{t:t+n}, s_{t+N}, e_t),
where:
s_t = State (observation) at time t.
a_t = Action taken from state s_t.
g = the additional discount, used by the agent to discount future returns.
R_{t:t+n} = N-step discounted return, i.e. accumulated over N rewards:
R_{t:t+n} := r_t + g * d_t * r_{t+1} + ...
+ g^{n-1} * d_t * ... * d_{t+n-2} * r_{t+n-1}.
D_{t:t+n}: N-step product of agent discounts g_i and environment
"discounts" d_i.
D_{t:t+n} := g^{n-1} * d_{t} * ... * d_{t+n-1},
For most environments d_i is 1 for all steps except the last,
i.e. it is the episode termination signal.
s_{t+n}: The "arrival" state, i.e. the state at time t+n.
e_t [Optional]: A nested structure of any 'extras' the user wishes to add.
Notes:
- At the beginning and end of episodes, shorter transitions are added.
That is, at the beginning of the episode, it will add:
(s_0 -> s_1), (s_0 -> s_2), ..., (s_0 -> s_n), (s_1 -> s_{n+1})
And at the end of the episode, it will add:
(s_{T-n+1} -> s_T), (s_{T-n+2} -> s_T), ... (s_{T-1} -> s_T).
- We add the *first* `extra` of each transition, not the *last*, i.e.
if extras are provided, we get e_t, not e_{t+n}.
"""
def __init__(
self,
client: reverb.Client,
n_step: int,
discount: float,
priority_fns: Optional[base.PriorityFnMapping] = None,
):
"""Creates an N-step transition adder.
Args:
client: A `reverb.Client` to send the data to replay through.
n_step: The "N" in N-step transition. See the class docstring for the
precise definition of what an N-step transition is. `n_step` must be at
least 1, in which case we use the standard one-step transition, i.e.
(s_t, a_t, r_t, d_t, s_t+1, e_t).
discount: Discount factor to apply. This corresponds to the
agent's discount in the class docstring.
priority_fns: See docstring for BaseAdder.
Raises:
ValueError: If n_step is less than 1.
"""
# Makes the additional discount a float32, which means that it will be
# upcast if rewards/discounts are float64 and left alone otherwise.
self._discount = np.float32(discount)
super().__init__(
client=client,
buffer_size=n_step,
max_sequence_length=1,
priority_fns=priority_fns)
def _write(self):
# NOTE: we do not check that the buffer is of length N here. This means
# that at the beginning of an episode we will add the initial N-1
# transitions (of size 1, 2, ...) and at the end of an episode (when
# called from write_last) we will write the final transitions of size (N,
# N-1, ...). See the Note in the docstring.
# Form the n-step transition given the steps.
observation = self._buffer[0].observation
action = self._buffer[0].action
extras = self._buffer[0].extras
next_observation = self._next_observation
# Initialize the n-step return and the discount accumulators. We make a
# copy of the first reward/discount so that when we add/multiply in place
# it won't change the actual reward or discount.
n_step_return = copy.deepcopy(self._buffer[0].reward)
total_discount = copy.deepcopy(self._buffer[0].discount)
# NOTE: total discount will have one less discount than it does
# step.discounts. This is so that when the learner/update uses an additional
# discount we don't apply it twice. Inside the following loop we will
# apply this right before summing up the n_step_return.
for step in itertools.islice(self._buffer, 1, None):
total_discount *= self._discount
n_step_return += step.reward * total_discount
total_discount *= step.discount
if extras:
transition = (observation, action, n_step_return, total_discount,
next_observation, extras)
else:
transition = (observation, action, n_step_return, total_discount,
next_observation)
# Create a list of steps.
final_step = utils.final_step_like(self._buffer[0], next_observation)
steps = list(self._buffer) + [final_step]
# Calculate the priority for this transition.
table_priorities = utils.calculate_priorities(self._priority_fns, steps)
# Insert the transition into replay along with its priority.
self._writer.append(transition)
for table, priority in table_priorities.items():
self._writer.create_item(
table=table, num_timesteps=1, priority=priority)
def _write_last(self):
# Drain the buffer until there are no transitions.
self._buffer.popleft()
while self._buffer:
self._write()
self._buffer.popleft()
@classmethod
def signature(cls,
environment_spec: specs.EnvironmentSpec,
extras_spec: types.NestedSpec = ()):
transition_spec = [
environment_spec.observations,
environment_spec.actions,
environment_spec.rewards,
environment_spec.discounts,
environment_spec.observations, # next_observation
]
if extras_spec:
transition_spec.append(extras_spec)
# print("signature is: ")
# print(tree.map_structure_with_path(base.spec_like_to_tensor_spec,
# tuple(transition_spec)))
return tree.map_structure_with_path(base.spec_like_to_tensor_spec,
tuple(transition_spec))
class SC2NStepTransitionAdder(base.ReverbAdder):
"""An N-step transition adder for StarCraft II
Notes:
- The different part from NStepTransitionAdder above are:
* the signature classmethod
Because the original one assumes simple environment.
For example, the observation spec is assumed to be only a single Array
with attribute shape and dtype but SCII has multiple arrays in the observation spec.
* the _write method
Because we need to convert the elements in transition to Tensor Object
in order to append them into the reverb table
"""
def __init__(
self,
client: reverb.Client,
n_step: int,
discount: float,
priority_fns: Optional[base.PriorityFnMapping] = None,
):
"""Creates an N-step transition adder.
Args:
client: A `reverb.Client` to send the data to replay through.
n_step: The "N" in N-step transition. See the class docstring for the
precise definition of what an N-step transition is. `n_step` must be at
least 1, in which case we use the standard one-step transition, i.e.
(s_t, a_t, r_t, d_t, s_t+1, e_t).
discount: Discount factor to apply. This corresponds to the
agent's discount in the class docstring.
priority_fns: See docstring for BaseAdder.
Raises:
ValueError: If n_step is less than 1.
"""
# Makes the additional discount a float32, which means that it will be
# upcast if rewards/discounts are float64 and left alone otherwise.
self._discount = np.float32(discount)
super().__init__(
client=client,
buffer_size=n_step,
max_sequence_length=1,
priority_fns=priority_fns)
def _write(self):
# NOTE: we do not check that the buffer is of length N here. This means
# that at the beginning of an episode we will add the initial N-1
# transitions (of size 1, 2, ...) and at the end of an episode (when
# called from write_last) we will write the final transitions of size (N,
# N-1, ...). See the Note in the docstring.
# Form the n-step transition given the steps.
observation = self._buffer[0].observation
action = self._buffer[0].action
extras = self._buffer[0].extras
next_observation = self._next_observation
# Initialize the n-step return and the discount accumulators. We make a
# copy of the first reward/discount so that when we add/multiply in place
# it won't change the actual reward or discount.
n_step_return = copy.deepcopy(self._buffer[0].reward)
total_discount = copy.deepcopy(self._buffer[0].discount)
# NOTE: total discount will have one less discount than it does
# step.discounts. This is so that when the learner/update uses an additional
# discount we don't apply it twice. Inside the following loop we | |
complete isolation, followed by secondary contact with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the spectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rate to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tsc, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = ((1-Q)*fsnr + Q*fslr)
return fs
def SCG(params, (n1,n2), pts):
nu1, nu2, b1, b2, m12, m21, Ts, Tsc = params
"""
Model with split, complete isolation, followed by secondary contact with exponential growth
nu1: Size of population 1 at split.
nu2: Size of population 2 at split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
# phi for the equilibrium ancestral population
phi = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phi = dadi.PhiManip.phi_1D_to_2D(xx, phi)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phi = dadi.Integration.two_pops(phi, xx, Ts, nu1, nu2, m12=0, m21=0)
# We start the population reduction after the split and set the migration rates to m12 and m21
bnu1_func = lambda t: nu1 * b1**(t/Tsc)
bnu2_func = lambda t: nu2 * b2**(t/Tsc)
phi = dadi.Integration.two_pops(phi, xx, Tsc, bnu1_func, bnu2_func, m12=m12, m21=m21)
###
## Calculate the spectrum
fs = dadi.Spectrum.from_phi(phi, (n1,n2), (xx,xx))
return fs
def SC2N2m(params, (n1,n2), pts):
nu1, nu2, hrf, m12, m21, me12, me21, Ts, Tsc, P, Q = params
"""
Model of semi permeability with split, complete isolation, followed by secondary contact with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact and present.
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
P: The proportion of the genome evolving neutrally
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to zero
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Tsc, nu1, nu2, m12=me12, m21=me21)
###
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
#### Calculate the pectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
phinr = dadi.Integration.two_pops(phinr, xx, Tsc, nu1, nu2, m12=m12, m21=m21)
###
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split and isolation to nu1 and nu2 and set the migration rates to zero
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=0, m21=0)
# We keep the population sizes after the split and isolation to nu1 and nu2 and set the migration rate to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Tsc, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
###
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = (Q*fslr+(1-Q)*fsnr+P*fsN+(1-P)*fsI)
return fs
def SC2NG(params, (n1,n2), pts):
nu1, nu2, b1, b2, hrf, m12, m21, Ts, Tsc, Q = params
"""
Model of semi permeability with split, complete isolation, followed by secondary contact with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
Tsc: The scale time between the secondary contact | |
6000 N !
PARAMETER G(TEST,HE,C,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,C,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,C,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,C,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,N,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,HE,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,B;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,B;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,B;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,C;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,C;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,C;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,BE,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,C;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,C;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,C;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,B,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,C,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,N,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,LI,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,C;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,C;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,C;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,B,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,C,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,N,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,BE,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,N;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,N;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,N;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,C,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,N,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,B,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,B,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,B,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,O;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,O;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,O;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,N,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,C,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,C,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,C,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,F;0) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,F;1) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,F;2) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,N,O,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,N,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,N,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,N,F,NE;2) 300 STR#; 6000 N !
PARAMETER G(TEST,O,F,NE;0) 300 STR#; 6000 N !
PARAMETER G(TEST,O,F,NE;1) 300 STR#; 6000 N !
PARAMETER G(TEST,O,F,NE;2) 300 STR#; 6000 N !
"""
ALFE_TDB = """
$ ALFE
$
$ -----------------------------------------------------------------------------
$ 2006.12.21
$ 2007.02.20 mod ( 386.15 --> 368.15 )
$
$ TDB file created by K.Hashimoto and T.Abe,
$
$ Particle Simulation and Thermodynamics Group, National Institute for
$ Materials Science. 1-2-1 Sengen, Tsukuba, Ibaraki 305-0047, Japan
$
$ e-mail: <EMAIL>
$
$ Copyright (C) NIMS 2007
$ -----------------------------------------------------------------------------
$
$ The parameter set is taken from
$ COST 507, Thermochemical database for light metal alloys, vol.2
$ Ed. I.Ansara, A.T.Dinsdale, M.H.Rand, (1998)
$ ISBN: 92-828-3902-8
$
$ -----------------------------------------------------------------------------
$
ELEMENT /- ELECTRON_GAS 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT VA VACUUM 0.0000E+00 0.0000E+00 0.0000E+00!
ELEMENT AL FCC_A1 26.981539 4577.296 28.3215!
ELEMENT FE BCC_A2 55.847 4489 27.28 !
$
$--------1---------2---------3---------4---------5---------6---------7---------8
$
FUNCTION UN_ASS 298.15 0; 300.00 N !
FUNCTION GHSERAL 298.15 -7976.15+137.093038*T-24.3671976*T*LN(T)
-.001884662*T**2-8.77664E-07*T**3+74092*T**(-1); 700 Y
-11276.24+223.048446*T-38.5844296*T*LN(T)+.018531982*T**2
-5.764227E-06*T**3+74092*T**(-1); 933.47 Y
-11278.378+188.684153*T-31.748192*T*LN(T)-1.230524E+28*T**(-9); 2900 N !
FUNCTION GHSERFE 298.15 +1225.7+124.134*T-23.5143*T*LN(T)
-.00439752*T**2-5.8927E-08*T**3+77359*T**(-1); 1811 Y
-25383.581+299.31255*T-46*T*LN(T)+2.29603E+31*T**(-9); 6000 N !
$
FUNCTION GALBCC 298.15 +10083-4.813*T+GHSERAL#; 6000 N !
FUNCTION GBCCAL 298.15 +10083-4.813*T+GHSERAL#; 6000 N !
| |
FuchsianSystem(object):
@staticmethod
def from_M(m, x, eps):
# singular points
xs = sorted(singularities(m, x).keys())
# residues in singular points
rs = {}
for xi in xs:
rs[xi] = SystemResidue(matrix_residue(m, x, xi), eps)
# TODO: check if system's residues have half-integer eigenvalues.
# If yes, then inform user about that and let us remove resonances only.
# Though, such a form does not allow to construct an eps-form, it
# however can be used to find solutions as a power series using
# semi-analytical methods.
return FuchsianSystem(rs, x, eps)
def __init__(self, rs, x, eps):
self.rs = rs
self.xs = sorted(rs.keys())
self.x = x
self.eps = eps
def __str__(self):
#s = "zeroes:\n %s\n" % (self.xs,)
s = ""
a = 0
for xi, ri in self.rs.items():
s += "a = %s (x = %s)\n%s\n" % (a, xi, ri)
a += 1
return s
def write(self, filename):
m = SR(0)
for xi, ri in self.rs.items():
if xi == oo:
continue
m += ri.m/(self.x-xi)
export_matrix_to_file(filename, m)
def _balance(self, i1, j1, i2, j2):
x1, x2 = self.xs[i1], self.xs[i2]
r1, r2 = self.rs[x1], self.rs[x2]
v1, v2 = r1.eigenvectors_right()[j1][1], r2.eigenvectors_left()[j2][1]
print("x1 = %s" % x1)
print("x2 = %s" % x2)
print("v1 = %s" % v1)
print("v2 = %s" % v2)
sp = simple(dot_product(v1, v2))
print("sp = %s" % sp)
P = simple(cross_product(v1, v2) / sp)
print("P =\n%s" % P)
m = balance_transform(self.m, P, x1, x2, self.x)
return FuchsianSystem.from_M(m, self.x, self.eps)
#@logcall
def balance(self, i1, j1, i2, j2):
x1, x2 = self.xs[i1], self.xs[i2]
r1, r2 = self.rs[x1], self.rs[x2]
v1, v2 = r1.eigenvectors_right()[j1][1], r2.eigenvectors_left()[j2][1]
sp = simple(dot_product(v1, v2))
print("sp = %s" % sp)
P = simple(cross_product(v1, v2) / sp)
print("P =\n%s" % P)
coP = 1-P
if x1 == oo:
Cmap = defaultdict(SR.zero)
for pi, Ci in self.rs.items():
if pi == oo:
continue
# coP Ci coP (x-pi)^-1 + P Ci P (x-pi)^-1
Cmap[pi, -1] += coP*Ci.m*coP + P*Ci.m*P
# coP Ci P -(x-x2) (x-pi)^-1
cmap_add_mul(Cmap, -coP*Ci.m*P, pi, -1, x2)
# P Ci coP -1/(x-x2) (x-pi)^-1
cmap_add_div(Cmap, -P*Ci.m*coP, pi, -1, x2)
# -P/(x-x2)
Cmap[x2, -1] -= P
elif x2 == oo:
Cmap = defaultdict(SR.zero)
for pi, Ci in self.rs.items():
if pi == oo:
continue
# coP Ci coP (x-pi)^-1 + P Ci P (x-pi)^-1
Cmap[pi, -1] += coP*Ci.m*coP + P*Ci.m*P
# P Ci coP -(x-x1) (x-pi)^-1
cmap_add_mul(Cmap, -P*Ci.m*coP, pi, -1, x1)
# coP Ci P -1/(x-x1) (x-pi)^-1
cmap_add_div(Cmap, -coP*Ci.m*P, pi, -1, x1)
# P/(x-x1)
Cmap[x1, -1] += P
else:
Cmap = defaultdict(SR.zero, [((pi,-1),ri.m) for pi,ri in self.rs.items()])
for pi, Ci in self.rs.items():
if pi == oo:
continue
# coP Ci P (x1-x2)/(x-x1) (x-pi)^-1
cmap_add_div(Cmap, coP*Ci.m*P*(x1-x2), pi, -1, x1)
# P Ci coP (x2-x1)/(x-x2) (x-pi)^-1
cmap_add_div(Cmap, P*Ci.m*coP*(x2-x1), pi, -1, x2)
# P/(x-x1) - P/(x-x2)
Cmap[x1, -1] += P
Cmap[x2, -1] -= P
rs = {}
Coo = SR.zero()
for key, C in Cmap.items():
pi, ki = key
if pi == oo:
continue
Cmap[key] = C = simple(C)
if C.is_zero():
del Cmap[key]
else:
assert(ki == -1)
rs[pi] = SystemResidue(C)
Coo -= C
Coo = simple(Coo)
rs[oo] = SystemResidue(Coo)
#T = fuchsia_simplify(self.T * balance(P, x1, x2, self.x))
#return RationalSystem(dict(Cmap), self.x, T, self.trace + [("balance", P, x1, x2)])
return FuchsianSystem(rs, self.x, self.eps)
#@logcall
@memoize
def balances(self):
res = []
for i1,x1 in enumerate(self.xs):
for i2,x2 in enumerate(self.xs):
if i1 == i2:
continue
for j1, j2, aux in self.balances2(x1, x2):
res.append((i1,j1, i2,j2, aux))
return res
#@logcall
def balances2(self, x1, x2):
#assert(not (x1-x2).is_zero())
res = []
r1, r2 = self.rs[x1], self.rs[x2]
for j1, (a1,v1) in enumerate(r1.eigenvectors_right()):
for j2, (a2,v2) in enumerate(r2.eigenvectors_left()):
sp = simple(dot_product(v1, v2))
if sp.is_zero():
continue
res.append((j1, j2, (a1,a2, sp)))
return res
#@logcall
@memoize
def eigenvalues(self):
res = []
for xi in self.xs:
res.append((xi, self.rs[xi].eigenvalues()))
return res
# @memoize
# @logcall
# def eigenvectors_left(self):
# return self._eigenvectors(left=True)
#
# @memoize
# @logcall
# def eigenvectors_right(self):
# return self._eigenvectors(left=False)
#
# def _eigenvectors(self, left=True):
# res = []
# for xi, ri in self.rs.items():
# eigenvectors = ri.eigenvectors_left
# if left is not True:
# eigenvectors = ri.eigenvectors_right
# res.append((xi, eigenvectors()))
# return res
def normalize(self):
pass
class SystemResidue(object):
def __init__(self, m, eps=None):
self.m = m
self.eps = eps
def __str__(self):
return str(self.m)
def __repr__(self):
return self.m.__repr__()
@logcall
@memoize
def eigenvalues(self):
return simple(mathematica.Eigenvalues(self.m).sage())
@logcall
@memoize
def eigenvectors_left(self):
return self._eigenvectors(left=True)
@logcall
@memoize
def eigenvectors_right(self):
return self._eigenvectors(left=False)
def _eigenvectors(self, left=True):
m = self.m.transpose() if left is True else self.m
s = simple(mathematica.Eigensystem(m).sage())
res = zip(s[0], s[1])
return res
# def _eigenvectors(self, left=True):
# res = []
# eigenvectors = self.m.eigenvectors_left
# if left is not True:
# eigenvectors = self.m.eigenvectors_right
# res = [vvm[0:2] for vvm in eigenvectors()]
# return res
# helpers
@logcall
def simple(obj, x=None):
def _simplify(ex):
return mathematica.Simplify(ex).sage()
if hasattr(obj, "apply_map"):
return obj.apply_map(_simplify)
else:
return _simplify(obj)
def ex_in(ex, lst):
#if any((ex-t).is_zero() for t in printed):
for t in lst:
if (ex-t).is_zero():
return True
return False
class NormalizeAssistant(object):
def __init__(self, m):
self.m = m # current matrix to balance
self.history = [] # matrices balanced so far
# Entry point
def start(self):
# cmd = self.cmd_main
# while cmd != None:
# cmd = cmd()
# return
try:
cmd = self.cmd_main
while cmd != None:
cmd = cmd()
except Exception as error:
print("UNEXPECTED ERROR:\n%s" % (error,))
return self.start()
# Helpers
def choose_balance(self, i1, i2):
x1, x2 = self.m.xs[i1], self.m.xs[i2]
bs = self.m.balances2(x1, x2)
for i, (j1, j2, aux) in enumerate(bs):
print("[%s] %s" % (i, aux))
n = raw_input("(choose balance) > ")
try:
n = int(n)
except ValueError:
return None
if n < 0 or n > len(bs):
return None
b = bs[n]
return (i1, b[0], i2, b[1])
# Commands
def cmd_balance(self, b):
print("balancing with %s" % (b,))
mm = apply(self.m.balance, b)
self.history.append(mm)
self.m = mm
return self.cmd_main
def cmd_balance_res(self, args):
if len(args) != 2:
return self.cmd_error("needs exactly 2 arguments")
try:
i1, i2 = [int(n) for n in args]
except ValueError:
return self.cmd_error("arguments out of range (type e for valid values)")
b = self.choose_balance(i1, i2)
if b is None:
return self.cmd_error("unknown balance")
return self.cmd_balance(b[0:4])
def cmd_error(self, msg):
print("ERROR: %s" % msg)
return self.cmd_main
def cmd_main(self):
cmd = raw_input("> ").split(' ')
cmd, args = cmd[0], cmd[1:]
if cmd == 's':
return self.cmd_save(args)
elif cmd == 'b':
return self.cmd_balance_res(args)
elif cmd == 'e':
return self.cmd_print_eigenvalues
elif cmd == 'h':
return self.cmd_print_help
elif cmd == 'q':
return None
elif cmd == '':
return self.cmd_print_help
return self.cmd_unknown_command(cmd)
def cmd_print_eigenvalues(self):
for i, (xi, a) in enumerate(self.m.eigenvalues()):
print("[%s] x = %s\n %s" % (i, xi, a))
return self.cmd_main
def cmd_print_help(self):
"""
Available commands:
b <i> <j> balance xi, xj points
e show eigenvectors
h print this help
q quit
s <file> save matrix to file
"""
print(inspect.cleandoc(self.cmd_print_help.__doc__))
return self.cmd_main
def cmd_save(self, args):
if len(args) != 1:
return self.cmd_error("needs exactly 1 arguments")
name = args[0]
self.m.write(name)
return self.cmd_main
def cmd_unknown_balance(self, n):
print("Unknown balance: '%s'" % n)
return self.cmd_main
def cmd_unknown_command(self, cmd):
print("Unknown command: '%s'" % cmd)
return self.cmd_print_help
def normalize(m, x, eps, seed=0):
"""Given a Fuchsian system of differential equations of the
form dF/dx=m*F, find a transformation that will shift all
the eigenvalues of m's residues into [-1/2, 1/2) range (in
the limit eps->0). Return the transformed matrix m and the
transformation. Raise FuchsiaError if such transformation
is not found.
"""
class State(object):
def __init__(self, m, x, eps, seed):
# random
self.random = Random(seed)
# points
self.points = singularities(m, x).keys()
# ev_cum
self.ev_cum = {}
for x0 in self.points:
pos, neg = 0, 0
a0 = matrix_residue(m, x, x0)
for ev in a0.eigenvalues():
ev0 = limit_fixed(ev, eps, 0)
if ev0 > 0:
pos += ev0
if ev0 < 0:
neg += ev0
logger.debug("x = %s, ev = %s" % (x0, ev0))
self.ev_cum[x0] = [pos,neg]
# x0
logger.debug("state.ev_cum = %s" % self.ev_cum)
self.x0 = None
def is_normalized(self):
for ev in self.ev_cum.values():
if ev != [0,0]:
return False
return True
def pairs(self):
points = [x0 for x0 in self.points if self.ev_cum[x0] != [0,0]]
if len(points) == 1:
self.select_x0(points[0])
points.append(self.x0)
return permutations(points, 2)
def select_x0(self, x1):
if self.x0 is not None:
return
for x0 in self.points:
if x0 != x1:
| |
verbose=True)
glove.add_dictionary(corpus_model.dictionary)
time_elapsed = datetime.now() - start_time
gloveModelFile = "trained/glove_fashion_epochs" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_vecs" + ".model"
glove.save(gloveModelFile)
notes = "Glove Fashion Data," + str(dimensionality) + " dim, " + str(context) + " context, " + str(
epochs) + " epochs \n" + "Training time: " + str(time_elapsed)
save_to_file(fileName, notes)
gloveVecFile = "trained/glove_fashion_epochs" + str(epochs) + "_d" + str(dimensionality) + "_c" + str(
context) + "_vecs" + ".vec"
save_glove_bin_to_vec(glove, gloveVecFile)
def save_to_file(fileName, text):
"""Utility function for saving to string to file"""
with open(fileName, 'w+') as file:
file.write(text)
def append_to_file(fileName, text):
"""Utility function for appending string to file"""
with open(fileName, 'a') as file:
file.write(text)
def my_vector_getter(words, word, my_coordinates):
"""function that returns word vector as numpy array"""
index = words.index(word)
word_array = my_coordinates[index].ravel()
return (word_array)
def norm_word(word):
""" Computes normalized form of word for Retrofitting"""
if isNumber.search(word.lower()):
return '---num---'
elif re.sub(r'\W+', '', word) == '':
return '---punc---'
else:
return word.lower()
def read_lexicon(filename):
""" Reads lexicon file"""
lexicon = {}
for line in open(filename, 'r'):
words = line.lower().strip().split()
lexicon[norm_word(words[0])] = [norm_word(word) for word in words[1:]]
return lexicon
def retrofit(wordVecs, lexicon, numIters):
""" Retrofit word vectors """
newWordVecs = deepcopy(wordVecs)
wvVocab = set(newWordVecs.keys())
loopVocab = wvVocab.intersection(set(lexicon.keys()))
for it in range(numIters):
# loop through every node also in ontology (else just use data estimate)
for word in loopVocab:
wordNeighbours = set(lexicon[word]).intersection(wvVocab)
numNeighbours = len(wordNeighbours)
# no neighbours, pass - use data estimate
if numNeighbours == 0:
continue
# the weight of the data estimate if the number of neighbours
newVec = numNeighbours * wordVecs[word]
# loop over neighbours and add to new vector (currently with weight 1)
for ppWord in wordNeighbours:
newVec += newWordVecs[ppWord]
newWordVecs[word] = newVec / (2 * numNeighbours)
return newWordVecs
def gensimModelToDict(model):
""" Convert gensim model to dict to use in Retrofitting algorithm"""
wordVecs = {}
for word in model.wv.vocab.keys():
wordVecs[word] = model.wv.syn0[model.wv.vocab[word].index]
# Optional normalization
# wordVecs[word] /= math.sqrt((wordVecs[word]**2).sum() + 1e-6) #normalize
return wordVecs
def does_not_match():
""" Does-not-match evaluation of word vectors"""
# model = gensim.models.KeyedVectors.load_word2vec_format('pretrained/googlenews_negative_300d_100B.bin', binary=True)
model = gensim.models.KeyedVectors.load_word2vec_format("trained/fasttext_fashion_dim300_c3_skipgram.vec",
binary=False)
print("dress jacket shirt coat green, does not match: {0}".format(
model.doesnt_match("dress jacket shirt coat green".split())))
print(
"sweater jersey hoodie pullover shoe".format(model.doesnt_match("sweater jersey hoodie pullover shoe".split())))
print("shoe boot sneaker trainer sandal hat".format(
model.doesnt_match("shoe boot sneaker trainer sandal hat".split())))
def retrofitting():
""" Orchestrates retrofitting of word vectors"""
# model = gensim.models.KeyedVectors.load_word2vec_format("trained/fasttext_fashion_dim300_c3_skipgram.vec", binary=False)
# model = gensim.models.KeyedVectors.load_word2vec_format("trained/word2vec_fashion_dim300_c3_1.vec", binary=False)
model = gensim.models.KeyedVectors.load_word2vec_format("trained/glove_fashion_dim300_c3_-.vec", binary=False)
wordVecs = gensimModelToDict(model)
lexicon = read_lexicon("./lexicon/framenet.txt")
numIter = int(10)
outFileName = "retrofitted/test.vec"
retrofittedVecs = retrofit(wordVecs, lexicon, numIter)
save_retrofitted_to_vec(retrofittedVecs, outFileName)
def test():
""" Evaluate All """
test_word2vec_google_news_300()
test_fasttext_wiki_300()
test_glove_wiki_300()
test_glove_twitter_200()
test_fashion(300, 1, "skipgram", "fasttext", False)
test_fashion(300, 2, "skipgram", "fasttext", False)
test_fashion(300, 3, "skipgram", "fasttext", False)
test_fashion(300, 4, "skipgram", "fasttext", False)
test_fashion(300, 5, "skipgram", "fasttext", False)
test_fashion(300, 6, "skipgram", "fasttext", False)
test_fashion(300, 7, "skipgram", "fasttext", False)
test_fashion(300, 8, "skipgram", "fasttext", False)
test_fashion(300, 9, "skipgram", "fasttext", False)
test_fashion(300, 10, "skipgram", "fasttext", False)
test_fashion(300, 11, "skipgram", "fasttext", False)
test_fashion(300, 12, "skipgram", "fasttext", False)
test_fashion(300, 13, "skipgram", "fasttext", False)
test_fashion(300, 14, "skipgram", "fasttext", False)
test_fashion(300, 1, "cbow", "fasttext", False)
test_fashion(300, 2, "cbow", "fasttext", False)
test_fashion(300, 3, "cbow", "fasttext", False)
test_fashion(300, 4, "cbow", "fasttext", False)
test_fashion(300, 5, "cbow", "fasttext", False)
test_fashion(300, 6, "cbow", "fasttext", False)
test_fashion(300, 7, "cbow", "fasttext", False)
test_fashion(300, 8, "cbow", "fasttext", False)
test_fashion(300, 9, "cbow", "fasttext", False)
test_fashion(300, 10, "cbow", "fasttext", False)
test_fashion(300, 11, "cbow", "fasttext", False)
test_fashion(300, 12, "cbow", "fasttext", False)
test_fashion(300, 13, "cbow", "fasttext", False)
test_fashion(300, 14, "cbow", "fasttext", False)
test_fashion(300, 1, "-", "glove", False)
test_fashion(300, 2, "-", "glove", False)
test_fashion(300, 3, "-", "glove", False)
test_fashion(300, 4, "-", "glove", False)
test_fashion(300, 5, "-", "glove", False)
test_fashion(300, 6, "-", "glove", False)
test_fashion(300, 7, "-", "glove", False)
test_fashion(300, 8, "-", "glove", False)
test_fashion(300, 9, "-", "glove", False)
test_fashion(300, 10, "-", "glove", False)
test_fashion(300, 11, "-", "glove", False)
test_fashion(300, 12, "-", "glove", False)
test_fashion(300, 13, "-", "glove", False)
test_fashion(300, 14, "-", "glove", False)
test_fashion(300, 1, "1", "word2vec", False)
test_fashion(300, 2, "1", "word2vec", False)
test_fashion(300, 3, "1", "word2vec", False)
test_fashion(300, 4, "1", "word2vec", False)
test_fashion(300, 5, "1", "word2vec", False)
test_fashion(300, 6, "1", "word2vec", False)
test_fashion(300, 7, "1", "word2vec", False)
test_fashion(300, 8, "1", "word2vec", False)
test_fashion(300, 9, "1", "word2vec", False)
test_fashion(300, 10, "1", "word2vec", False)
test_fashion(300, 11, "1", "word2vec", False)
test_fashion(300, 12, "1", "word2vec", False)
test_fashion(300, 13, "1", "word2vec", False)
test_fashion(300, 14, "1", "word2vec", False)
test_fashion(300, 1, "0", "word2vec", False)
test_fashion(300, 2, "0", "word2vec", False)
test_fashion(300, 3, "0", "word2vec", False)
test_fashion(300, 4, "0", "word2vec", False)
test_fashion(300, 5, "0", "word2vec", False)
test_fashion(300, 6, "0", "word2vec", False)
test_fashion(300, 7, "0", "word2vec", False)
test_fashion(300, 8, "0", "word2vec", False)
test_fashion(300, 9, "0", "word2vec", False)
test_fashion(300, 10, "0", "word2vec", False)
test_fashion(300, 11, "0", "word2vec", False)
test_fashion(300, 12, "0", "word2vec", False)
test_fashion(300, 13, "0", "word2vec", False)
test_fashion(300, 14, "0", "word2vec", False)
test_fashion(50, 2, "skipgram", "fasttext", False)
test_fashion(100, 2, "skipgram", "fasttext", False)
test_fashion(150, 2, "skipgram", "fasttext", False)
test_fashion(200, 2, "skipgram", "fasttext", False)
test_fashion(250, 2, "skipgram", "fasttext", False)
test_fashion(350, 2, "skipgram", "fasttext", False)
test_fashion(400, 2, "skipgram", "fasttext", False)
test_fashion(450, 2, "skipgram", "fasttext", False)
test_fashion(500, 2, "skipgram", "fasttext", False)
test_fashion(550, 2, "skipgram", "fasttext", False)
test_fashion(600, 2, "skipgram", "fasttext", False)
def train_all():
""" Train All"""
train_fasttext_fashionrec(300, 3, "skipgram", 15)
train_word2vec_fashionrec(300, 3, 1, 15)
train_glove_fashionrec(300, 3, 15)
train_fasttext_fashionrec(300, 3, "cbow", 15)
train_word2vec_fashionrec(300, 3, 0, 15)
train_fasttext_fashionrec(300, 4, "skipgram", 15)
train_word2vec_fashionrec(300, 4, 1, 15)
train_fasttext_fashionrec(300, 4, "cbow", 15)
train_word2vec_fashionrec(300, 4, 0, 15)
train_glove_fashionrec(300, 4, 15)
train_fasttext_fashionrec(300, 5, "skipgram", 15)
train_word2vec_fashionrec(300, 5, 1, 15)
train_fasttext_fashionrec(300, 5, "cbow", 15)
train_word2vec_fashionrec(300, 5, 0, 15)
train_glove_fashionrec(300, 5, 15)
train_fasttext_fashionrec(300, 6, "skipgram", 15)
train_word2vec_fashionrec(300, 6, 1, 15)
train_fasttext_fashionrec(300, 6, "cbow", 15)
train_word2vec_fashionrec(300, 6, 0, 15)
train_glove_fashionrec(300, 6, 15)
train_fasttext_fashionrec(300, 7, "skipgram", 15)
train_word2vec_fashionrec(300, 7, 1, 15)
train_fasttext_fashionrec(300, 7, "cbow", 15)
train_word2vec_fashionrec(300, 7, 0, 15)
train_glove_fashionrec(300, 7, 15)
train_fasttext_fashionrec(300, 8, "skipgram", 15)
train_word2vec_fashionrec(300, 8, 1, 15)
train_fasttext_fashionrec(300, 8, "cbow", 15)
train_word2vec_fashionrec(300, 8, 0, 15)
train_glove_fashionrec(300, 8, 15)
train_fasttext_fashionrec(300, 9, "skipgram", 15)
train_word2vec_fashionrec(300, 9, 1, 15)
train_fasttext_fashionrec(300, 9, "cbow", 15)
train_word2vec_fashionrec(300, 9, 0, 15)
train_glove_fashionrec(300, 9, 15)
train_fasttext_fashionrec(300, 10, "skipgram", 15)
train_word2vec_fashionrec(300, 10, 1, 15)
train_fasttext_fashionrec(300, 10, "cbow", 15)
train_word2vec_fashionrec(300, 10, 0, 15)
train_glove_fashionrec(300, 10, 15)
train_fasttext_fashionrec(50, 3, "skipgram", 15)
train_word2vec_fashionrec(50, 3, 1, 15)
train_glove_fashionrec(50, 3, 15)
train_fasttext_fashionrec(50, 3, "cbow", 15)
train_word2vec_fashionrec(50, 3, 0, 15)
train_fasttext_fashionrec(50, 4, "skipgram", 15)
train_word2vec_fashionrec(50, 4, 1, 15)
train_fasttext_fashionrec(50, 4, "cbow", 15)
train_word2vec_fashionrec(50, 4, 0, 15)
train_glove_fashionrec(50, 4, 15)
train_fasttext_fashionrec(50, 5, "skipgram", 15)
train_word2vec_fashionrec(50, 5, 1, 15)
train_fasttext_fashionrec(50, 5, "cbow", 15)
train_word2vec_fashionrec(50, 5, 0, 15)
train_glove_fashionrec(50, 5, 15)
train_fasttext_fashionrec(50, 6, "skipgram", 15)
train_word2vec_fashionrec(50, 6, 1, 15)
train_fasttext_fashionrec(50, 6, "cbow", 15)
train_word2vec_fashionrec(50, 6, 0, 15)
train_glove_fashionrec(50, 6, 15)
train_fasttext_fashionrec(50, 7, "skipgram", 15)
train_word2vec_fashionrec(50, 7, 1, 15)
train_fasttext_fashionrec(50, 7, "cbow", 15)
train_word2vec_fashionrec(50, 7, 0, 15)
train_glove_fashionrec(50, 7, 15)
train_fasttext_fashionrec(50, 8, "skipgram", 15)
train_word2vec_fashionrec(50, 8, 1, 15)
train_fasttext_fashionrec(50, 8, "cbow", 15)
train_word2vec_fashionrec(50, 8, 0, 15)
train_glove_fashionrec(50, 8, 15)
train_fasttext_fashionrec(50, 9, "skipgram", 15)
train_word2vec_fashionrec(50, 9, 1, 15)
train_fasttext_fashionrec(50, 9, "cbow", 15)
train_word2vec_fashionrec(50, 9, 0, 15)
train_glove_fashionrec(50, 9, 15)
train_fasttext_fashionrec(50, 10, "skipgram", 15)
train_word2vec_fashionrec(50, 10, 1, 15)
train_fasttext_fashionrec(50, 10, "cbow", 15)
train_word2vec_fashionrec(50, 10, 0, 15)
train_glove_fashionrec(50, 10, 15)
train_fasttext_fashionrec(100, 3, "skipgram", 15)
train_word2vec_fashionrec(100, 3, 1, 15)
train_glove_fashionrec(100, 3, 15)
train_fasttext_fashionrec(100, 3, "cbow", 15)
train_word2vec_fashionrec(100, 3, 0, 15)
train_fasttext_fashionrec(100, 4, "skipgram", 15)
train_word2vec_fashionrec(100, 4, 1, 15)
train_fasttext_fashionrec(100, 4, "cbow", 15)
train_word2vec_fashionrec(100, 4, 0, 15)
train_glove_fashionrec(100, 4, 15)
train_fasttext_fashionrec(100, 5, "skipgram", 15)
train_word2vec_fashionrec(100, 5, 1, 15)
train_fasttext_fashionrec(100, 5, "cbow", 15)
train_word2vec_fashionrec(100, 5, 0, 15)
train_glove_fashionrec(100, 5, 15)
train_fasttext_fashionrec(100, 6, "skipgram", 15)
train_word2vec_fashionrec(100, 6, 1, 15)
train_fasttext_fashionrec(100, 6, "cbow", 15)
train_word2vec_fashionrec(100, 6, 0, 15)
train_glove_fashionrec(100, 6, 15)
train_fasttext_fashionrec(100, 7, "skipgram", 15)
train_word2vec_fashionrec(100, 7, 1, 15)
train_fasttext_fashionrec(100, 7, "cbow", 15)
train_word2vec_fashionrec(100, 7, 0, 15)
train_glove_fashionrec(100, 7, 15)
train_fasttext_fashionrec(100, 8, "skipgram", 15)
train_word2vec_fashionrec(100, 8, 1, 15)
train_fasttext_fashionrec(100, 8, "cbow", 15)
train_word2vec_fashionrec(100, 8, 0, 15)
train_glove_fashionrec(100, 8, 15)
train_fasttext_fashionrec(100, 9, "skipgram", 15)
train_word2vec_fashionrec(100, 9, 1, 15)
train_fasttext_fashionrec(100, 9, "cbow", 15)
train_word2vec_fashionrec(100, 9, 0, 15)
train_glove_fashionrec(100, 9, 15)
train_fasttext_fashionrec(100, 10, "skipgram", 15)
train_word2vec_fashionrec(100, 10, 1, 15)
train_fasttext_fashionrec(100, 10, "cbow", 15)
train_word2vec_fashionrec(100, 10, 0, 15)
train_glove_fashionrec(100, 10, 15)
| |
------------------------------------------------------------------------
Get a submatrix.
Returns the sparsity of the submatrix, with a mapping such that submatrix[k]
= originalmatrix[mapping[k]]
"""
return _casadi.Sparsity_sub(self, *args)
def transpose(self, *args):
"""
Transpose the matrix and get the reordering of the non-zero entries.
transpose(self, bool invert_mapping) -> (Sparsity , [int] OUTPUT)
Parameters:
-----------
mapping: the non-zeros of the original matrix for each non-zero of the new
matrix
"""
return _casadi.Sparsity_transpose(self, *args)
def is_transpose(self, *args):
"""
Check if the sparsity is the transpose of another.
is_transpose(self, Sparsity y) -> bool
"""
return _casadi.Sparsity_is_transpose(self, *args)
def is_reshape(self, *args):
"""
Check if the sparsity is a reshape of another.
is_reshape(self, Sparsity y) -> bool
"""
return _casadi.Sparsity_is_reshape(self, *args)
def combine(self, *args):
"""
Combine two sparsity patterns Returns the new sparsity pattern as well as a
combine(self, Sparsity y, bool f0x_is_zero, bool fx0_is_zero) -> Sparsity
mapping with the same length as the number of non-zero elements The mapping
matrix contains the arguments for each nonzero, the first bit indicates if
the first argument is nonzero, the second bit indicates if the second
argument is nonzero (note that none of, one of or both of the arguments can
be nonzero)
"""
return _casadi.Sparsity_combine(self, *args)
def unite(self, *args):
"""
Union of two sparsity patterns.
unite(self, Sparsity y) -> Sparsity
"""
return _casadi.Sparsity_unite(self, *args)
def __add__(self, *args):
"""
__add__(self, Sparsity b) -> Sparsity
"""
return _casadi.Sparsity___add__(self, *args)
def intersect(self, *args):
"""
Intersection of two sparsity patterns Returns the new sparsity pattern as
intersect(self, Sparsity y) -> Sparsity
well as a mapping with the same length as the number of non-zero elements
The value is 1 if the non-zero comes from the first (i.e. this) object, 2 if
it is from the second and 3 (i.e. 1 | 2) if from both.
"""
return _casadi.Sparsity_intersect(self, *args)
def __mul__(self, *args):
"""
__mul__(self, Sparsity b) -> Sparsity
"""
return _casadi.Sparsity___mul__(self, *args)
def pattern_inverse(self, *args):
"""
Take the inverse of a sparsity pattern; flip zeros and non-zeros.
pattern_inverse(self) -> Sparsity
"""
return _casadi.Sparsity_pattern_inverse(self, *args)
def enlarge(self, *args):
"""
Enlarge matrix Make the matrix larger by inserting empty rows and columns,
enlarge(self, int nrow, int ncol, [int] rr, [int] cc, bool ind1)
keeping the existing non-zeros.
For the matrices A to B A(m, n) length(jj)=m , length(ii)=n B(nrow, ncol)
A=enlarge(m, n, ii, jj) makes sure that
B[jj, ii] == A
"""
return _casadi.Sparsity_enlarge(self, *args)
def enlargeRows(self, *args):
"""
Enlarge the matrix along the first dimension (i.e. insert rows)
enlargeRows(self, int nrow, [int] rr, bool ind1)
"""
return _casadi.Sparsity_enlargeRows(self, *args)
def enlargeColumns(self, *args):
"""
Enlarge the matrix along the second dimension (i.e. insert columns)
enlargeColumns(self, int ncol, [int] cc, bool ind1)
"""
return _casadi.Sparsity_enlargeColumns(self, *args)
def makeDense(self, *args):
"""
Make a patten dense.
makeDense(self) -> (Sparsity , [int] OUTPUT)
"""
return _casadi.Sparsity_makeDense(self, *args)
def erase(self, *args):
"""
Erase elements of a matrix.
erase(self, [int] rr, bool ind1) -> [int]
erase(self, [int] rr, [int] cc, bool ind1) -> [int]
Erase rows and/or columns of a matrix.
> erase(self, [int] rr, bool ind1)
------------------------------------------------------------------------
Erase elements of a matrix.
> erase(self, [int] rr, [int] cc, bool ind1)
------------------------------------------------------------------------
Erase rows and/or columns of a matrix.
"""
return _casadi.Sparsity_erase(self, *args)
def append(self, *args):
"""
Append another sparsity patten vertically (NOTE: only efficient if vector)
append(self, Sparsity sp)
"""
return _casadi.Sparsity_append(self, *args)
def appendColumns(self, *args):
"""
Append another sparsity patten horizontally.
appendColumns(self, Sparsity sp)
"""
return _casadi.Sparsity_appendColumns(self, *args)
def is_scalar(self, *args):
"""
Is scalar?
is_scalar(self, bool scalar_and_dense) -> bool
"""
return _casadi.Sparsity_is_scalar(self, *args)
def is_dense(self, *args):
"""
Is dense?
is_dense(self) -> bool
"""
return _casadi.Sparsity_is_dense(self, *args)
def is_row(self, *args):
"""
Check if the pattern is a row vector (i.e. size1()==1)
is_row(self) -> bool
"""
return _casadi.Sparsity_is_row(self, *args)
def is_column(self, *args):
"""
Check if the pattern is a column vector (i.e. size2()==1)
is_column(self) -> bool
"""
return _casadi.Sparsity_is_column(self, *args)
def is_vector(self, *args):
"""
Check if the pattern is a row or column vector.
is_vector(self) -> bool
"""
return _casadi.Sparsity_is_vector(self, *args)
def is_diag(self, *args):
"""
Is diagonal?
is_diag(self) -> bool
"""
return _casadi.Sparsity_is_diag(self, *args)
def is_square(self, *args):
"""
Is square?
is_square(self) -> bool
"""
return _casadi.Sparsity_is_square(self, *args)
def is_symmetric(self, *args):
"""
Is symmetric?
is_symmetric(self) -> bool
"""
return _casadi.Sparsity_is_symmetric(self, *args)
def is_triu(self, *args):
"""
Is upper triangular?
is_triu(self) -> bool
"""
return _casadi.Sparsity_is_triu(self, *args)
def is_tril(self, *args):
"""
Is lower triangular?
is_tril(self) -> bool
"""
return _casadi.Sparsity_is_tril(self, *args)
def is_singular(self, *args):
"""
Check whether the sparsity-pattern indicates structural singularity.
is_singular(self) -> bool
"""
return _casadi.Sparsity_is_singular(self, *args)
def rowsSequential(self, *args):
"""
Do the rows appear sequentially on each column.
rowsSequential(self, bool strictly) -> bool
Parameters:
-----------
strictly: if true, then do not allow multiple entries
"""
return _casadi.Sparsity_rowsSequential(self, *args)
def removeDuplicates(self, *args):
"""
Remove duplicate entries.
removeDuplicates(self) -> [int]
The same indices will be removed from the mapping vector, which must have
the same length as the number of nonzeros
"""
return _casadi.Sparsity_removeDuplicates(self, *args)
def etree(self, *args):
"""
Calculate the elimination tree See Direct Methods for Sparse Linear Systems
etree(self, bool ata) -> [int]
by Davis (2006). If the parameter ata is false, the algorithm is equivalent
to MATLAB's etree(A), except that the indices are zero- based. If ata is
true, the algorithm is equivalent to MATLAB's etree(A, 'col').
The implementation is a modified version of cs_etree in CSparse Copyright(c)
<NAME>, 2006-2009 Licensed as a derivative work under the GNU LGPL
"""
return _casadi.Sparsity_etree(self, *args)
def ldl(self, *args):
"""
Symbolic LDL factorization Returns the sparsity pattern of L^T.
ldl(self, bool amd) -> (Sparsity , [int] OUTPUT)
The implementation is a modified version of LDL Copyright(c) <NAME>, 2005-2013 Licensed as a derivative work under the GNU LGPL
"""
return _casadi.Sparsity_ldl(self, *args)
def qr_sparse(self, *args):
"""
Symbolic QR factorization Returns the sparsity pattern of V (compact
qr_sparse(self, bool amd) -> (Sparsity OUTPUT, Sparsity OUTPUT, [int] OUTPUT, [int] OUTPUT)
representation of Q) and R as well as vectors needed for the numerical
factorization and solution. The implementation is a modified version of
CSparse Copyright(c) <NAME>, 2006-2009 Licensed as a derivative
work under the GNU LGPL.
"""
return _casadi.Sparsity_qr_sparse(self, *args)
def dfs(self, *args):
"""
Depth-first search on the adjacency graph of the sparsity See Direct Methods
dfs(self, int j, int top, [int] pinv) -> (int , [int] INOUT, [int] INOUT, [bool] INOUT)
for Sparse Linear Systems by Davis (2006).
"""
return _casadi.Sparsity_dfs(self, *args)
def scc(self, *args):
"""
Find the strongly connected components of the bigraph defined by the
scc(self) -> (int , [int] OUTPUT, [int] OUTPUT)
sparsity pattern of a square matrix.
See Direct Methods for Sparse Linear Systems by Davis (2006). Returns:
Number of components
Offset for each components (length: 1 + number of components)
Indices for each components, component i has indices index[offset[i]], ...,
index[offset[i+1]]
In the case that the matrix is symmetric, the result has a particular
interpretation: Given a symmetric matrix A and n = A.scc(p, r)
=> A[p, p] will appear block-diagonal with n blocks and with the indices of
the block boundaries to be found in r.
The implementation is a modified version of cs_scc in CSparse Copyright(c)
<NAME>, 2006-2009 Licensed as a derivative work under the GNU LGPL
"""
return _casadi.Sparsity_scc(self, *args)
def btf(self, *args):
"""
Calculate the block triangular form (BTF) See Direct Methods for Sparse
btf(self) -> (int , [int] OUTPUT, [int] OUTPUT, [int] OUTPUT, [int] OUTPUT, [int] OUTPUT, [int] OUTPUT)
Linear Systems by Davis (2006).
The function computes the Dulmage-Mendelsohn decomposition, which allows you
to reorder the rows and columns of a matrix to bring it into block
triangular form (BTF).
It will not consider the distance of off-diagonal elements to the diagonal:
there is no guarantee you will get a block-diagonal matrix if you supply a
randomly permuted block-diagonal matrix.
If your matrix is symmetrical, this method is of limited use; permutation
can make it non-symmetric.
See: scc The | |
<reponame>a414351664/DuReader<filename>tensorflow/tfu.py
# coding:utf-8
import tensorflow as tf
import tensorflow.contrib as tfctr
import numpy as np
import re
INF = 1e20
class OptimizerWrapper(object):
def __init__(self, optimizer, grad_clip=None, decay=None, exclude=None):
self._opt = optimizer
self._grad_clip = grad_clip or 5
self._decay_param = 7e-7 if decay is None else decay
self._exclude = set(exclude) if exclude is not None else None
def compute_gradients(self, loss):
grads = self._opt.compute_gradients(loss)
gradients, variables = zip(*grads)
if self._grad_clip > 0.0:
gradients, _ = tf.clip_by_global_norm(gradients, self._grad_clip)
return zip(gradients, variables)
def _get_decay_var_list(self):
if self._exclude is None:
var_list = tf.trainable_variables()
else:
var_list = []
for var in tf.trainable_variables():
is_in = True
for kx in self._exclude:
if kx in var.name:
is_in = False
break
if is_in:
var_list.append(var)
return var_list
def apply_gradients(self, grads_and_vars, global_step):
train_op = self._opt.apply_gradients(grads_and_vars, global_step=global_step)
var_list = self._get_decay_var_list()
l2_loss = tf.add_n([tf.nn.l2_loss(ix) for ix in var_list]) * self._decay_param / 0.5
self._l2_loss = l2_loss
decay_opt = tf.train.GradientDescentOptimizer(1)
decay_op = decay_opt.minimize(l2_loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(train_op, decay_op, update_ops)
return train_op
def minimize(self, loss, global_step):
a = self.compute_gradients(loss)
return self.apply_gradients(a, global_step)
@property
def l2_loss(self):
return self._l2_loss
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""
A basic Adam optimizer that includes "correct" L2 weight decay.
Copy from the [bert](https://github.com/google-research/bert?from=timeline&isappinstalled=0)
"""
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999,
epsilon=1e-6, exclude_from_weight_decay=None, name="AdamWeightDecayOptimizer"):
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=param_name + "/adam_m", shape=param.shape.as_list(),
dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=param_name + "/adam_v", shape=param.shape.as_list(),
dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class CudnnGRU(object):
def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope=None):
self.num_layers = num_layers
self.grus = []
self.inits = []
self.dropout_mask = []
self._scope = scope
with tf.variable_scope(self._scope or 'gru'):
for layer in range(num_layers):
input_size_ = input_size if layer == 0 else 2 * num_units
gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units)
gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units)
init_fw = tf.tile(tf.get_variable('init_fw_%d' % layer, dtype=tf.float32, shape=[1, 1, num_units],
initializer=tf.zeros_initializer), [1, batch_size, 1])
init_bw = tf.tile(tf.get_variable('init_bw_%d' % layer, dtype=tf.float32, shape=[1, 1, num_units],
initializer=tf.zeros_initializer), [1, batch_size, 1])
mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train)
mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train)
self.grus.append((gru_fw, gru_bw,))
self.inits.append((init_fw, init_bw,))
self.dropout_mask.append((mask_fw, mask_bw,))
def __call__(self, inputs, seq_len, concat_layers=True):
outputs = [tf.transpose(inputs, [1, 0, 2])]
output_states = []
with tf.variable_scope(self._scope or 'gru'):
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
init_fw, init_bw = self.inits[layer]
mask_fw, mask_bw = self.dropout_mask[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, _ = gru_fw(outputs[-1] * mask_fw, initial_state=(init_fw,))
out_tt = tf.reverse_sequence(out_fw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw,))
out_bw = tf.reverse_sequence(out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
output_states.append(tf.concat([out_tt[0], out_bw[0]], axis=1))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
output_states = tf.concat(output_states, axis=1)
else:
res = outputs[-1]
output_states = output_states[-1]
res = tf.transpose(res, [1, 0, 2])
return res, output_states
class NativeGRU(object):
def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"):
self.num_layers = num_layers
self.grus = []
self.inits = []
self.dropout_mask = []
self.scope = scope
with tf.variable_scope(self.scope or 'native_gru'):
for layer in range(num_layers):
input_size_ = input_size if layer == 0 else 2 * num_units
gru_fw = tfctr.rnn.GRUBlockCellV2(num_units)
gru_bw = tfctr.rnn.GRUBlockCellV2(num_units)
init_fw = tf.get_variable('init_fw_%d' % layer, dtype=tf.float32, shape=[1, num_units],
initializer=tf.zeros_initializer)
init_bw = tf.get_variable('init_bw_%d' % layer, dtype=tf.float32, shape=[1, num_units],
initializer=tf.zeros_initializer)
init_fw = tf.tile(init_fw, [batch_size, 1])
init_bw = tf.tile(init_bw, [batch_size, 1])
mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train)
mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train)
self.grus.append((gru_fw, gru_bw,))
self.inits.append((init_fw, init_bw,))
self.dropout_mask.append((mask_fw, mask_bw,))
def __call__(self, inputs, seq_len, concat_layers=True):
outputs = [inputs]
output_states = []
with tf.variable_scope(self.scope):
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
init_fw, init_bw = self.inits[layer]
mask_fw, mask_bw = self.dropout_mask[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, state_fw = tf.nn.dynamic_rnn(
gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32)
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
out_bw, state_bw = tf.nn.dynamic_rnn(
gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32)
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
output_states.append(tf.concat([state_fw, state_bw], axis=1))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
output_states = tf.concat(output_states, axis=1)
else:
res = outputs[-1]
output_states = output_states[-1]
return res, output_states
class SimpleCNN(object):
def __init__(self, num_filters=10, filter_size=(2, 3, 4, 5), keep_prob=1.0, is_train=None, scope=None,
activation=None, bias=True, mode='SAME'):
self._is_bias = bias
self._mode = mode
self._filter_sizes = filter_size
self._kprob = keep_prob
self._is_train = is_train
self._scope = scope or 'simple_cnn'
self._activation = activation
if isinstance(num_filters, int):
self._num_filter = [num_filters] * len(filter_size)
elif isinstance(num_filters, (tuple, list, np.ndarray)):
self._num_filter = num_filters
assert len(self._num_filter) == len(self._filter_sizes)
def __call__(self, inputs, concat_layers=True, reuse=False):
outputs = []
with tf.variable_scope(self._scope, reuse=reuse):
for fil_size, num_fil in zip(self._filter_sizes, self._num_filter):
masked_inputs = dropout(inputs, self._kprob, self._is_train)
res = convolution(masked_inputs, num_fil, kernel_size=fil_size, scope='conv_%d' % fil_size,
bias=self._is_bias, mode=self._mode)
if self._activation is not None:
res = self._activation(res)
outputs.append(res)
outputs = tf.concat(outputs[1:], axis=-1) if concat_layers else outputs[-1]
return outputs
class _Transformer(object):
def __init__(self, hidden, layers, heads, ffd_hidden, ffd_fn=None, keep_prob=1.0,
is_train=None, scope='transformer'):
self._hidden = hidden
self._layer = layers
self._heads = heads
self._ffd_hidden = ffd_hidden
self._ffd_fn = ffd_fn or gelu
self._kprob = keep_prob
self._is_train = is_train
self._scope = scope
if hidden % heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (hidden, heads))
self._att_hidden = hidden // heads
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TransformerEncoder(_Transformer):
def __init__(self, hidden, layers, heads, ffd_hidden, ffd_fn=None, keep_prob=1.0,
is_train=None, scope='transformer_encoder'):
super(TransformerEncoder, self).__init__(hidden, layers, heads, ffd_hidden, ffd_fn, keep_prob,
is_train, scope)
def __call__(self, inputs, mask, all_layer=False, reuse=False, **kwargs):
with tf.variable_scope(self._scope, reuse=reuse):
hidden = inputs.shape.as_list()[-1]
if self._hidden != hidden:
# raise ValueError("The width of the input tensor (%d) != hidden size (%d) due to the residuals" %
# (hidden, self._hidden))
inputs = self._ffd_fn(dense(inputs, self._hidden, use_bias=False, scope='input_proj'))
outputs = [inputs]
mask = tf.expand_dims(tf.expand_dims(mask, 1), 2) # [batch, 1, 1, m_length]
for layer in range(self._layer):
with tf.variable_scope('layer_%d' % layer):
now_out = self._layer_call(outputs[-1], mask)
outputs.append(now_out)
return outputs[1:] if all_layer else outputs[-1]
def _layer_call(self, inputs, mask):
att_res = multi_head_attention(inputs, inputs, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mask, keep_prob=self._kprob, scope='self_attention')
# att_res = dense(att_res, self._hidden, scope='compress')
att_res = dropout(att_res, self._kprob, self._is_train)
att_res = layer_norm(att_res + inputs, 'att')
res = self._ffd_fn(dense(att_res, self._ffd_hidden, scope='ffd_w0'))
res = dense(res, self._hidden, scope='ffd_w1')
res = dropout(res, self._kprob, self._is_train)
res = layer_norm(res + att_res, scope='ffd')
return res
class TransformerDecoder(_Transformer):
def __init__(self, hidden, layers, heads, ffd_hidden, ffd_fn=None, keep_prob=1.0,
is_train=None, scope='transformer_decoder'):
super(TransformerDecoder, self).__init__(hidden, layers, heads, ffd_hidden, ffd_fn, keep_prob,
is_train, scope)
# for decoder step
self._step_memory = None
self._step_mem_mask = None
self._batch = None
self._att_prob = None
@property
def attention_prob(self):
return self._att_prob
@property
def before_input_shape(self):
before_shape = {'layer_{}'.format(ix): tf.TensorShape([None, None, None])
for ix in range(-1, self._layer)}
before_shape['is_start'] = tf.TensorShape([])
return before_shape
@property
def before_init(self):
before = {'layer_{}'.format(ix): tf.zeros((self._batch, 1, self._hidden), dtype=tf.float32)
for ix in range(-1, self._layer)}
before['is_start'] = tf.constant(True)
return before
def _train_self_att_block(self, inputs, input_mask):
with tf.variable_scope('self_att'):
att_res = multi_head_attention(inputs, inputs, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=input_mask, keep_prob=self._kprob, scope='self_attention')
# att_res = dense(att_res, self._hidden, scope='compress')
att_res = dropout(att_res, self._kprob, self._is_train)
att_res = layer_norm(att_res + inputs, 'att')
return att_res
def _train_memory_att_block(self, att_res, memory, mem_mask):
with tf.variable_scope('mem_att'):
enc_att, prob = multi_head_attention(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train, mem_mask=mem_mask,
keep_prob=self._kprob, scope='attention', is_prob=True)
self._att_prob = prob
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
return enc_att
def _train_ffd_block(self, enc_att):
with tf.variable_scope('ffd'):
res = self._ffd_fn(dense(enc_att, self._ffd_hidden, scope='ffd_w0'))
res = dense(res, self._hidden, scope='ffd_w1')
res = dropout(res, self._kprob, self._is_train)
res = layer_norm(res + enc_att, scope='ffd')
return res
| |
<reponame>radon-provenance/radon-web
# Copyright 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
from django.http import (
StreamingHttpResponse,
Http404,
HttpResponse,
)
from django.core.exceptions import PermissionDenied
from django.shortcuts import render
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
#
from archive.forms import (
CollectionForm,
CollectionNewForm,
ResourceForm,
ResourceNewForm,
)
from radon.model import (
Collection,
Group,
Resource
)
from radon.model.errors import (
CollectionConflictError,
ResourceConflictError
)
from radon.util import merge
@login_required
def delete_collection(request, path):
"""Display the page to delete a collection"""
coll = Collection.find(path)
if not coll:
raise Http404
if not coll.user_can(request.user, "delete"):
raise PermissionDenied
if request.method == "POST":
parent_coll = Collection.find(coll.path)
if parent_coll:
parent_path = parent_coll.container
else:
# Just in case
parent_path = ""
coll.delete(username=request.user.name)
messages.add_message(
request,
messages.INFO,
u"The collection '{}' has been deleted".format(coll.name),
)
return redirect("archive:view", path=parent_path)
return render(request, "archive/delete.html", {"collection": coll})
@login_required
def delete_resource(request, path):
"""Display the page to delete a resource"""
resource = Resource.find(path)
if not resource:
raise Http404
if not resource.user_can(request.user, "delete"):
raise PermissionDenied
container = Collection.find(resource.container)
if request.method == "POST":
resource.delete(username=request.user.name)
messages.add_message(
request,
messages.INFO,
"The resource '{}' has been deleted".format(resource.name),
)
return redirect("archive:view", path=container.path)
# Requires delete on resource
ctx = {
"resource": resource,
"container": container,
}
return render(request, "archive/resource/delete.html", ctx)
@login_required
def new_reference(request, parent):
"""Manage the forms to create a new reference resource"""
parent_collection = Collection.find(parent)
# Inherits perms from container by default.
if not parent_collection:
raise Http404()
# User must be able to write to this collection
if not parent_collection.user_can(request.user, "write"):
raise PermissionDenied
read_access, write_access = parent_collection.get_acl_list()
initial = {
"metadata": {},
"read_access": read_access,
"write_access": write_access,
}
if request.method == "POST":
form = ReferenceNewForm(request.POST, initial=initial)
if form.is_valid():
data = form.cleaned_data
try:
url = data["url"]
name = data["name"]
metadata = {}
for k, v in json.loads(data["metadata"]):
if k in metadata:
if isinstance(metadata[k], list):
metadata[k].append(v)
else:
metadata[k] = [metadata[k], v]
else:
metadata[k] = v
resource = Resource.create(
container=parent_collection.path,
name=name,
metadata=metadata,
url=url,
username=request.user.name,
)
resource.create_acl_list(data["read_access"], data["write_access"])
messages.add_message(
request,
messages.INFO,
u"New resource '{}' created".format(resource.get_name()),
)
except ResourceConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use within the current collection",
)
return redirect("archive:view", path=parent_collection.path)
else:
form = ReferenceNewForm(initial=initial)
ctx = {"form": form, "container": parent_collection, "groups": Group.objects.all()}
return render(request, "archive/resource/new_reference.html", ctx)
def download(request, path):
""" Download the content of a resource"""
resource = Resource.find(path)
if not resource:
raise Http404
if not resource.user_can(request.user, "read"):
raise PermissionDenied
if resource.is_reference():
r = requests.get(resource.url, stream=True)
resp = StreamingHttpResponse(
streaming_content=r, content_type=resource.get_mimetype()
)
else:
resp = StreamingHttpResponse(
streaming_content=resource.chunk_content(),
content_type=resource.get_mimetype(),
)
resp["Content-Disposition"] = u'attachment; filename="{}"'.format(resource.name)
return resp
@login_required
def edit_collection(request, path):
"""Display the form to edit an existing collection"""
coll = Collection.find(path)
if not coll:
raise Http404
if not coll.user_can(request.user, "edit"):
raise PermissionDenied
if request.method == "POST":
form = CollectionForm(request.POST)
if form.is_valid():
metadata = {}
for k, v in json.loads(form.cleaned_data["metadata"]):
if k in metadata:
if isinstance(metadata[k], list):
metadata[k].append(v)
else:
metadata[k] = [metadata[k], v]
else:
metadata[k] = v
try:
data = form.cleaned_data
coll.update(metadata=metadata, username=request.user.name)
coll.create_acl_list(data["read_access"], data["write_access"])
return redirect("archive:view", path=coll.path)
except CollectionConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use in the current collection",
)
else:
md = coll.get_cdmi_user_meta()
metadata = json.dumps(md)
if not md:
metadata = '{"":""}'
read_access, write_access = coll.get_acl_list()
initial_data = {
"name": coll.name,
"metadata": metadata,
"read_access": read_access,
"write_access": write_access,
}
form = CollectionForm(initial=initial_data)
groups = Group.objects.all()
return render(
request,
"archive/edit.html",
{"form": form, "collection": coll, "groups": groups},
)
@login_required
def edit_resource(request, path):
"""Display the form to edit an existing resource"""
# Requires edit on resource
resource = Resource.find(path)
if not resource:
raise Http404()
container = Collection.find(resource.container)
if not container:
raise Http404()
if not resource.user_can(request.user, "edit"):
raise PermissionDenied
if request.method == "POST":
form = ResourceForm(request.POST)
if form.is_valid():
metadata = {}
for k, v in json.loads(form.cleaned_data["metadata"]):
if k in metadata:
if isinstance(metadata[k], list):
metadata[k].append(v)
else:
metadata[k] = [metadata[k], v]
else:
metadata[k] = v
try:
data = form.cleaned_data
resource.update(metadata=metadata, username=request.user.name)
resource.create_acl_list(data["read_access"], data["write_access"])
return redirect("archive:resource_view", path=resource.path)
except ResourceConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use within the current collection",
)
else:
md = resource.get_cdmi_user_meta()
metadata = json.dumps(md)
if not md:
metadata = '{"":""}'
read_access, write_access = resource.get_acl_list()
initial_data = {
"name": resource.name,
"metadata": metadata,
"read_access": read_access,
"write_access": write_access,
}
form = ResourceForm(initial=initial_data)
ctx = {
"form": form,
"resource": resource,
"container": container,
"groups": Group.objects.all(),
}
return render(request, "archive/resource/edit.html", ctx)
@login_required()
def home(request):
"""Display the root of the archive"""
return redirect("archive:view")
@login_required
def new_collection(request, parent):
"""Display the form to create a new collection"""
parent_collection = Collection.find(parent)
if not parent_collection.user_can(request.user, "write"):
raise PermissionDenied
read_access, write_access = parent_collection.get_acl_list()
initial = {
"metadata": {},
"read_access": read_access,
"write_access": write_access,
}
form = CollectionNewForm(request.POST or None, initial=initial)
if request.method == "POST":
if form.is_valid():
data = form.cleaned_data
try:
name = data["name"]
parent = parent_collection.path
metadata = {}
for k, v in json.loads(data["metadata"]):
if k in metadata:
if isinstance(metadata[k], list):
metadata[k].append(v)
else:
metadata[k] = [metadata[k], v]
else:
metadata[k] = v
collection = Collection.create(
name=name,
container=parent,
metadata=metadata,
creator=request.user.name,
)
collection.create_acl_list(data["read_access"], data["write_access"])
messages.add_message(
request,
messages.INFO,
u"New collection '{}' created".format(collection.name),
)
return redirect("archive:view", path=collection.path)
except CollectionConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use in the current collection",
)
except ResourceConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use in the current collection",
)
groups = Group.objects.all()
return render(
request,
"archive/new.html",
{"form": form, "parent": parent_collection, "groups": groups},
)
@login_required
def new_resource(request, parent):
"""Manage the forms to create a new resource"""
parent_collection = Collection.find(parent)
# Inherits perms from container by default.
if not parent_collection:
raise Http404()
# User must be able to write to this collection
if not parent_collection.user_can(request.user, "write"):
raise PermissionDenied
read_access, write_access = parent_collection.get_acl_list()
initial = {
"metadata": {},
"read_access": read_access,
"write_access": write_access,
}
if request.method == "POST":
form = ResourceNewForm(request.POST, files=request.FILES, initial=initial)
if form.is_valid():
data = form.cleaned_data
try:
name = data["name"]
metadata = {}
for k, v in json.loads(data["metadata"]):
if k in metadata:
if isinstance(metadata[k], list):
metadata[k].append(v)
else:
metadata[k] = [metadata[k], v]
else:
metadata[k] = v
resource = Resource.create(
container=parent_collection.path,
name=name,
metadata=metadata,
mimetype=data["file"].content_type,
creator=request.user.name,
size=data["file"].size,
)
res = resource.put(data["file"])
resource.create_acl_list(data["read_access"], data["write_access"])
messages.add_message(
request,
messages.INFO,
u"New resource '{}' created".format(resource.get_name()),
)
except ResourceConflictError:
messages.add_message(
request,
messages.ERROR,
"That name is in use within the current collection",
)
return redirect("archive:view", path=parent_collection.path)
else:
form = ResourceNewForm(initial=initial)
ctx = {"form": form, "container": parent_collection, "groups": Group.objects.all()}
return render(request, "archive/resource/new.html", ctx)
@login_required
def preview(request, path):
"""
Find the preview of the resource with the given ID and deliver it. This will
be rendered in the iframe of the resource view page.
Deprecated for the moment as the iframe isn't working
"""
resource = Resource.find(path)
if not resource:
raise Http404
preview_info = {
# "type": "image",
# "url": "http://....."
}
return render(request, "archive/preview.html", {"preview": preview_info})
def search(request):
"""Display the search results page"""
query = request.GET.get("q")
collection = request.GET.get("collection")
ctx = {"q": query}
terms = [x.lower() for x in query.split(" ")]
results = SearchIndex.find(terms, request.user)
if collection:
results = [el for el in results if el["path"].startswith(collection)]
ctx["results"] = results
ctx["total"] = len(ctx["results"])
ctx["highlights"] = terms
return render(request, "archive/search.html", ctx)
@login_required()
def view_collection(request, path='/'):
"""Display the page which shows the subcollections/resources of a collection"""
if not path:
path = "/"
collection = Collection.find(path)
if not collection:
raise Http404()
if not collection.user_can(request.user, "read") and not collection.is_root:
# If the user can't read, then return 404 rather than 403 so that
# we don't leak information.
raise Http404()
paths = []
full = "/"
for p in collection.path.split("/"):
if not p:
continue
full = u"{}{}/".format(full, p)
paths.append((p, full))
children_c, children_r = collection.get_child(False)
children_c.sort(key=lambda x: x.lower())
children_r.sort(key=lambda x: x.lower())
ctx = {
"collection": collection.to_dict(request.user),
| |
from numpy import ndarray
import numpy as np
import operator
from unit import dim as dims, conv, idim
from unitparse import (eval_units as unit_parse,
linadd as d_add,
linsubtract as d_sub,
linscale as d_scale)
from utils import prec_round
class UnknownUnitError(KeyError):
def __init__(self,msg):
KeyError.__init__(self, msg)
class DimensionMismatchError(Exception):
"""
Exception class for attempted operations with inconsistent dimensions.
For example, ``3*mvolt + 2*amp`` raises this exception. The purpose of this
class is to help catch errors based on incorrect units. The exception will
print a representation of the dimensions of the two inconsistent objects
that were operated on.
Parameters
----------
description : ``str``
A description of the type of operation being performed, e.g. Addition,
Multiplication, etc.
dims : `Dimension`
The physical dimensions of the objects involved in the operation, any
number of them is possible
"""
def __init__(self, description, *objs):
# Call the base class constructor to make Exception pickable, see:
# http://bugs.python.org/issue1692335
Exception.__init__(self, description, *objs)
self.objs = objs
self.desc = description
def __repr__(self):
dims_repr = [repr(obj.dim) for obj in self.objs]
return '%s(%r, %s)' % (self.__class__.__name__,
self.desc, ', '.join(dims_repr))
def __str__(self):
s = self.desc
if len(self.objs) == 0:
pass
elif len(self.objs) == 1:
s += ' (unit is ' + '*'.join(f'{k}^{v}' for k, v in self.objs[0].units.items() if abs(v) > 0)
elif len(self.objs) == 2:
d1, d2 = self.dims
s += ' (units are {} and {}'.format('*'.join(f'{k}^{v}' for k, v in self.objs[0].units.items() if abs(v) > 0),
'*'.join(f'{k}^{v}' for k, v in self.objs[1].units.items() if abs(v) > 0))
# else: #all operations are binary
# s += (' (units are ' +
# ' '.join(['(' + get_unit_for_display(d) + ')'
# for d in self.dims]))
if len(self.objs):
s += ').'
return s
def is_scalar_type(obj):
"""
Tells you if the object is a 1d number type.
Parameters
----------
obj : `object`
The object to check.
Returns
-------
scalar : `bool`
``True`` if `obj` is a scalar that can be interpreted as a
dimensionless `Quantity`.
"""
try:
return obj.ndim == 0
except AttributeError:
return np.isscalar(obj) and not isinstance(obj, str)
def fail_for_dimension_mismatch(obj1, obj2=None, error_message=None,
**error_quantities):
'''
Compare the dimensions of two objects.
Parameters
----------
obj1, obj2 : {array-like, `Quantity`}
The object to compare. If `obj2` is ``None``, assume it to be
dimensionless
error_message : str, optional
An error message that is used in the DimensionMismatchError
error_quantities : dict mapping str to `Quantity`, optional
Quantities in this dictionary will be converted using the `_short_str`
helper method and inserted into the ``error_message`` (which should
have placeholders with the corresponding names). The reason for doing
this in a somewhat complicated way instead of directly including all the
details in ``error_messsage`` is that converting large quantity arrays
to strings can be rather costly and we don't want to do it if no error
occured.
Returns
-------
dim1, dim2 : `Dimension`, `Dimension`
The physical dimensions of the two arguments (so that later code does
not need to get the dimensions again).
Raises
------
DimensionMismatchError
If the dimensions of `obj1` and `obj2` do not match (or, if `obj2` is
``None``, in case `obj1` is not dimensionsless).
Notes
-----
Implements special checking for ``0``, treating it as having "any
dimensions".
'''
o1hasdim = hasattr(obj1, 'dimension')
o2hasdim = hasattr(obj2, 'dimension')
if (o1hasdim and o2hasdim) and (obj1.dim != obj2.dim).all(): # don't use None Type
dim1 = obj1.dim
dim2 = obj2.dim
# Special treatment for "0":
# if it is not a Quantity, it has "any dimension".
# This allows expressions like 3*mV + 0 to pass (useful in cases where
# zero is treated as the neutral element, e.g. in the Python sum
# builtin) or comparisons like 3 * mV == 0 to return False instead of
# failing # with a DimensionMismatchError. Note that 3*mV == 0*second
# is not allowed, though.
if (dim1.sum() == 0 and np.all(obj1 == 0) or
(dim2.sum() == 0 and np.all(obj2 == 0))):
return dim1, dim2
# We do another check here, this should allow Brian1 units to pass as
# having the same dimensions as a Brian2 unit
if (dim1 == dim2).all():
return dim1, dim2
if error_message is None:
error_message = 'Dimension mismatch'
else:
error_quantities = {name: _short_str(q)
for name, q in error_quantities.items()}
error_message = error_message.format(**error_quantities)
# If we are comparing an object to a specific unit, we don't want to
# restate this unit (it is probably mentioned in the text already)
if obj2 is None or isinstance(obj2, (Dimension, Unit)):
raise DimensionMismatchError(error_message, dim1)
else:
raise DimensionMismatchError(error_message, dim1, dim2)
else:
if o1hasdim and o2hasdim:
return obj1.dim, obj2.dim
elif o2hasdim and obj1 != 0:
raise DimensionMismatchError(error_message, obj2)
elif o1hasdim and obj2 != 0:
raise DimensionMismatchError(error_message, obj1)
else:
return None, None
def eval_dimension(units):
dimension = np.zeros(7)
try:
for k, v in units.items():
dimension += np.array(dims[k])*v
return dimension
except UnknownUnitError as e:
raise UnknownUnitError(f'passed unit string {units} has unknown unit: {e}')
def eval_conversion_factor(units):
factor = 1
for k, v in units.items():
factor *= conv[k]**v
return factor
class Quantity(ndarray):
def __new__(cls, arr, units, copy=False):
subarr = np.array(arr, dtype=float, copy=copy).view(cls)
if type(units) is str:
units = unit_parse(units)
subarr.units = units
try:
eval_dimension(units)
except UnknownUnitError as e:
raise UnknownUnitError(f'passed unit string {units} has unknown unit: {e}')
return subarr
@property
def dimension(self):
return eval_dimension(self.units)
@property
def dim(self):
return self.dimension
@property
def conversion_factor(self):
return eval_conversion_factor(self.units)
def convert_to_SI(self):
self *= self.conversion_factor
d = self.dimension
self.units = {'kg':d[0],
'm':d[1],
's':d[2],
'A':d[3],
'K':d[4],
'cd':d[5],
'mol':d[6]}
return self
def convert_to_unit(self, other_units):
if type(other_units) is str:
other_units = unit_parse(other_units)
try:
assert (eval_dimension(other_units) == self.dimension).all()
except AssertionError:
raise DimensionMismatchError
self *= eval_conversion_factor(self.units)/eval_conversion_factor(other_units)
self.units = other_units
return self
#### ARITHMETIC #### (this is all copied from brian2)
def _binary_operation(self, other, operation,
unit_operation=lambda a, b: a, fail_for_mismatch=False,
operator_str=None, inplace=False):
if fail_for_mismatch:
if inplace:
message = ('Cannot calculate ... %s {value}, units do not '
'match') % operator_str
_, other_dim = fail_for_dimension_mismatch(self, other,
message, value=other)
else:
message = ('Cannot calculate {value1} %s {value2}, units do not '
'match') % operator_str
_, other_dim = fail_for_dimension_mismatch(self, other, message,
value1=self,
value2=other)
if hasattr(other, 'units'):
other_units = other.units
else:
other_units = {}
if inplace:
if self.shape == ():
self_value = Quantity(self, copy=True)
else:
self_value = self
operation(self_value, other)
self_value.units = unit_operation(self.units, other_units)
return self_value
else:
newunits = unit_operation(self.units, other_units)
self_arr = np.array(self, copy=False)
other_arr = np.array(other, copy=False)
result = operation(self_arr, other_arr)
return Quantity(result, newunits)
def __mul__(self, other):
return self._binary_operation(other, operator.mul, d_add)
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
return self._binary_operation(other, np.ndarray.__imul__, d_add,
inplace=True)
def __div__(self, other):
return self._binary_operation(other, operator.truediv, d_sub)
def __truediv__(self, other):
return self.__div__(other)
def __rdiv__(self, other):
# division with swapped arguments
rdiv = lambda a, b: operator.truediv(b, a)
d_sub = lambda x, b: d_sub(b, a)
return self._binary_operation(other, rdiv, d_sub)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __idiv__(self, other):
return self._binary_operation(other, np.ndarray.__itruediv__,
d_sub, inplace=True)
def __itruediv__(self, other):
return self._binary_operation(other, np.ndarray.__itruediv__,
d_sub, inplace=True)
def __mod__(self, other):
return self._binary_operation(other, operator.mod,
fail_for_mismatch=True,operator_str=r'%')
def __add__(self, other):
return self._binary_operation(other, operator.add,
fail_for_mismatch=True,
operator_str='+')
def __radd__(self, other):
#not sure why rsub has complicated logic
return self.__add__(other)
def __iadd__(self, other):
return self._binary_operation(other, np.ndarray.__iadd__,
fail_for_mismatch=True,
operator_str='+=',
inplace=True)
def __sub__(self, other):
return self._binary_operation(other, operator.sub,
fail_for_mismatch=True,
operator_str='-')
def __rsub__(self, other):
# We allow operations with 0 even for dimension mismatches, e.g.
# 0 - 3*mV is allowed. In this case, the 0 is not represented by a
# Quantity object so we cannot simply call Quantity.__sub__
if ((not isinstance(other, Quantity) or other.dim is DIMENSIONLESS) and
np.all(other == 0)):
return self.__neg__()
else:
return Quantity(other, copy=False, force_quantity=True).__sub__(self)
def __isub__(self, other):
return self._binary_operation(other, np.ndarray.__isub__,
fail_for_mismatch=True,
operator_str='-=',
inplace=True)
def __pow__(self, other):
if isinstance(other, np.ndarray) or is_scalar_type(other):
fail_for_dimension_mismatch(other,
error_message='Cannot calculate '
'{base} ** {exponent}, '
'the exponent has to be '
'dimensionless',
base=self, exponent=other)
other = np.array(other, copy=False)
return Quantity(np.array(self, copy=False)**other,
d_scale(self.units, other))
else:
return NotImplemented
def __rpow__(self, other):
if self.is_dimensionless:
if isinstance(other, np.ndarray) or isinstance(other, np.ndarray):
new_array = np.array(other, copy=False)**np.array(self,
copy=False)
return Quantity(new_array, DIMENSIONLESS)
else:
return NotImplemented
else:
raise DimensionMismatchError(('Cannot calculate '
'{base} ** {exponent}, the '
'exponent has to be '
'dimensionless').format(base=_short_str(other),
exponent=_short_str(self)),
self.dim)
| |
], [
'{0};'.format(_crc_init_function_def(self.opt, self.sym)),
]),
'', '',
Comment(self.opt, '', [
'Update the crc value with new data.',
'',
'\\param[in] crc The current crc value.',
Conditional(self.opt, '', not _use_cfg_in_crc_update(self.opt), [
'\\param[in] cfg A pointer to an initialised {cfg_t} structure.'.format(**self.sym),
]),
'\\param[in] data Pointer to a buffer of \\a data_len bytes.',
'\\param[in] data_len Number of bytes in the \\a data buffer.',
'\\return The updated crc value.',
]),
'{0};'.format(_crc_update_function_def(self.opt, self.sym)),
'', '',
Comment(self.opt, '', [
'Calculate the final crc value.',
'',
Conditional(self.opt, '', not _use_cfg_in_finalize(self.opt), [
'\\param[in] cfg A pointer to an initialised {cfg_t} structure.'.format(**self.sym),
]),
'\\param[in] crc The current crc value.',
'\\return The final crc value.',
]),
Conditional2(self.opt, '', _use_inline_crc_finalize(self.opt), [
Conditional2(self.opt, '', self.opt.c_std == 'C89', [
'#define {0}(crc) ({1})'.format(self.sym['crc_finalize_function'], _crc_final_value(self.opt, self.sym)),
], [
'static inline {0}'.format(_crc_finalize_function_def(self.opt, self.sym)),
'{',
' return {0};'.format(_crc_final_value(self.opt, self.sym)),
'}',
]),
], [
'{0};'.format(_crc_finalize_function_def(self.opt, self.sym)),
]),
'', '',
'#ifdef __cplusplus',
'} /* closing brace for extern "C" */',
'#endif',
'',
'#endif /* {header_protection} */'.format(**self.sym),
'',
]
return out
def _c_file(self):
"""
Add C file content.
"""
out = [
CodeGen(self.opt, '', _includes(self.opt)),
'#include "{header_filename}" /* include the header file generated with pycrc */'.format(**self.sym),
'#include <stdlib.h>',
Conditional(self.opt, '', self.opt.c_std != 'C89', [
'#include <stdint.h>',
Conditional(self.opt, '', self.opt.undefined_crc_parameters or \
self.opt.algorithm == self.opt.algo_bit_by_bit or \
self.opt.algorithm == self.opt.algo_bit_by_bit_fast, [
'#include <stdbool.h>',
]),
]),
Conditional(self.opt, '', self.opt.slice_by > 1, [
'#include <endian.h>',
]),
Conditional(self.opt, '', _use_reflect_func(self.opt) and _use_static_reflect_func(self.opt), [
'',
'static {crc_t} {crc_reflect_function}({crc_t} data, size_t data_len);'.format(**self.sym),
]),
'',
CodeGen(self.opt, '', _crc_table(self.opt, self.sym)),
CodeGen(self.opt, '', _crc_reflect_function_gen(self.opt, self.sym)),
CodeGen(self.opt, '', _crc_init_function_gen(self.opt, self.sym)),
CodeGen(self.opt, '', _crc_table_gen(self.opt, self.sym)),
CodeGen(self.opt, '', _crc_update_function_gen(self.opt, self.sym)),
CodeGen(self.opt, '', _crc_finalize_function_gen(self.opt, self.sym)),
'',
]
return out
def _main_file(self):
"""
Add main file content.
"""
out = [
'',
'',
CodeGen(self.opt, '', _includes(self.opt)),
'#include <stdio.h>',
'#include <getopt.h>',
Conditional(self.opt, '', self.opt.undefined_crc_parameters, [
'#include <stdlib.h>',
'#include <stdio.h>',
'#include <ctype.h>',
]),
Conditional(self.opt, '', self.opt.c_std != 'C89', [
'#include <stdbool.h>',
]),
'#include <string.h>',
'',
'static char str[256] = "123456789";',
'static {c_bool} verbose = {c_false};'.format(**self.sym),
self._getopt_template(),
'',
'',
Conditional2(self.opt, '', self.opt.undefined_crc_parameters, [
'static void print_params(const {cfg_t} *cfg)'.format(**self.sym),
], [
'static void print_params(void)',
]),
'{',
CodeGen(self.opt, 4*' ', [
'char format[20];',
'',
Conditional2(self.opt, '', self.opt.c_std == 'C89', [
'sprintf(format, "%%-16s = 0x%%0%dlx\\n", (unsigned int)({cfg_width} + 3) / 4);'.format(**self.sym),
'printf("%-16s = %d\\n", "width", (unsigned int){cfg_width});'.format(**self.sym),
'printf(format, "poly", (unsigned long int){cfg_poly});'.format(**self.sym),
'printf("%-16s = %s\\n", "reflect_in", {0});'.format(self.sym['cfg_reflect_in'] + ' ? "true": "false"' if self.opt.reflect_in is None else ('"true"' if self.opt.reflect_in else '"false"')),
'printf(format, "xor_in", (unsigned long int){cfg_xor_in});'.format(**self.sym),
'printf("%-16s = %s\\n", "reflect_out", {0});'.format(self.sym['cfg_reflect_out'] + ' ? "true": "false"' if self.opt.reflect_out is None else ('"true"' if self.opt.reflect_out else '"false"')),
'printf(format, "xor_out", (unsigned long int){cfg_xor_out});'.format(**self.sym),
'printf(format, "crc_mask", (unsigned long int){cfg_mask});'.format(**self.sym),
'printf(format, "msb_mask", (unsigned long int){cfg_msb_mask});'.format(**self.sym),
], [
'snprintf(format, sizeof(format), "%%-16s = 0x%%0%dllx\\n", (unsigned int)({cfg_width} + 3) / 4);'.format(**self.sym),
'printf("%-16s = %d\\n", "width", (unsigned int){cfg_width});'.format(**self.sym),
'printf(format, "poly", (unsigned long long int){cfg_poly});'.format(**self.sym),
'printf("%-16s = %s\\n", "reflect_in", {0});'.format(self.sym['cfg_reflect_in'] + ' ? "true": "false"' if self.opt.reflect_in is None else ('"true"' if self.opt.reflect_in else '"false"')),
'printf(format, "xor_in", (unsigned long long int){cfg_xor_in});'.format(**self.sym),
'printf("%-16s = %s\\n", "reflect_out", {0});'.format(self.sym['cfg_reflect_out'] + ' ? "true": "false"' if self.opt.reflect_out is None else ('"true"' if self.opt.reflect_out else '"false"')),
'printf(format, "xor_out", (unsigned long long int){cfg_xor_out});'.format(**self.sym),
'printf(format, "crc_mask", (unsigned long long int){cfg_mask});'.format(**self.sym),
'printf(format, "msb_mask", (unsigned long long int){cfg_msb_mask});'.format(**self.sym),
]),
]),
'}',
'',
'',
Comment(self.opt, '', [
'C main function.',
'\\param[in] argc the number of arguments in \\a argv.',
'\\param[in] argv a NULL-terminated array of pointers to the argument strings.',
'\\retval 0 on success.',
'\\retval >0 on error.',
]),
'int main(int argc, char *argv[])',
'{',
CodeGen(self.opt, 4*' ', [
Conditional(self.opt, '', self.opt.undefined_crc_parameters, [
'{cfg_t} cfg = '.format(**self.sym) + '{',
Conditional(self.opt, 4*' ', self.opt.width is None, [
'0, /* width */',
]),
Conditional(self.opt, 4*' ', self.opt.poly is None, [
'0, /* poly */',
]),
Conditional(self.opt, 4*' ', self.opt.reflect_in is None, [
'0, /* reflect_in */',
]),
Conditional(self.opt, 4*' ', self.opt.xor_in is None, [
'0, /* xor_in */',
]),
Conditional(self.opt, 4*' ', self.opt.reflect_out is None, [
'0, /* reflect_out */',
]),
Conditional(self.opt, 4*' ', self.opt.xor_out is None, [
'0, /* xor_out */',
]),
Conditional(self.opt, 4*' ', self.opt.width is None, [
'',
'0, /* crc_mask */',
'0, /* msb_mask */',
'0, /* crc_shift */',
]),
'};',
]),
'{crc_t} crc;'.format(**self.sym),
'',
Conditional2(self.opt, '', self.opt.undefined_crc_parameters, [
'get_config(argc, argv, &cfg);',
], [
'get_config(argc, argv);',
]),
Conditional(self.opt, '', _use_crc_table_gen(self.opt), [
'{crc_table_gen_function}(&cfg);'.format(**self.sym),
]),
'crc = {0}({1});'.format(self.sym['crc_init_function'], '' if _use_constant_crc_init(self.sym) else '&cfg'),
'crc = {0}({1}crc, (void *)str, strlen(str));'.format(self.sym['crc_update_function'], '' if _use_cfg_in_crc_update(self.opt) else '&cfg, '),
'crc = {0}({1}crc);'.format(self.sym['crc_finalize_function'], '' if _use_cfg_in_finalize(self.opt) else '&cfg, '),
'',
'if (verbose) {',
CodeGen(self.opt, 4*' ', [
'print_params({0});'.format('&cfg' if self.opt.undefined_crc_parameters else ''),
]),
'}',
Conditional2(self.opt, '', self.opt.c_std == 'C89', [
'printf("0x%lx\\n", (unsigned long int)crc);',
], [
'printf("0x%llx\\n", (unsigned long long int)crc);',
]),
'return 0;',
]),
'}',
]
return out
def _getopt_template(self):
"""
Add getopt functions.
"""
out = [
Conditional(self.opt, '', self.opt.reflect_in is None or self.opt.reflect_out is None, [
'',
'',
'static {c_bool} atob(const char *str)'.format(**self.sym),
'{',
CodeGen(self.opt, 4*' ', [
'if (!str) {',
CodeGen(self.opt, 4*' ', [
'return 0;',
]),
'}',
'if (isdigit(str[0])) {',
CodeGen(self.opt, 4*' ', [
'return ({c_bool})atoi(str);'.format(**self.sym),
]),
'}',
'if (tolower(str[0]) == \'t\') {',
CodeGen(self.opt, 4*' ', [
'return {c_true};'.format(**self.sym),
]),
'}',
'return {c_false};'.format(**self.sym),
]),
'}',
]),
Conditional(self.opt, '', self.opt.poly is None or self.opt.xor_in is None or self.opt.xor_out is None, [
'',
'',
'static crc_t xtoi(const char *str)',
'{',
CodeGen(self.opt, 4*' ', [
'crc_t ret = 0;',
'',
'if (!str) {',
CodeGen(self.opt, 4*' ', [
'return 0;',
]),
'}',
'if (str[0] == \'0\' && tolower(str[1]) == \'x\') {',
CodeGen(self.opt, 4*' ', [
'str += 2;',
'while (*str) {',
CodeGen(self.opt, 4*' ', [
'if (isdigit(*str))',
CodeGen(self.opt, 4*' ', [
'ret = 16 * ret + *str - \'0\';',
]),
'else if (isxdigit(*str))',
CodeGen(self.opt, 4*' ', [
'ret = 16 * ret + tolower(*str) - \'a\' + 10;',
]),
'else',
CodeGen(self.opt, 4*' ', [
'return ret;',
]),
'str++;',
]),
'}',
]),
'} else if (isdigit(*str)) {',
CodeGen(self.opt, 4*' ', [
'while (*str) {',
CodeGen(self.opt, 4*' ', [
'if (isdigit(*str))',
CodeGen(self.opt, 4*' ', [
'ret = 10 * ret + *str - \'0\';',
]),
'else',
CodeGen(self.opt, 4*' ', [
'return ret;',
]),
'str++;',
]),
'}',
]),
'}',
'return ret;',
]),
'}',
]),
'',
'',
Conditional2(self.opt, '', self.opt.undefined_crc_parameters, [
'static int get_config(int argc, char *argv[], {cfg_t} *cfg)'.format(**self.sym),
], [
'static int get_config(int argc, char *argv[])',
]),
'{',
CodeGen(self.opt, 4*' ', [
'int c;',
'int option_index;',
'static struct option long_options[] = {',
CodeGen(self.opt, 4*' ', [
Conditional(self.opt, '', self.opt.width is None, [
'{"width", 1, 0, \'w\'},',
]),
Conditional(self.opt, '', self.opt.poly is None, [
'{"poly", 1, 0, \'p\'},',
]),
Conditional(self.opt, '', self.opt.reflect_in is None, [
'{"reflect-in", 1, 0, \'n\'},',
]),
Conditional(self.opt, '', self.opt.xor_in is None, [
'{"xor-in", 1, 0, \'i\'},',
]),
Conditional(self.opt, '', self.opt.reflect_out is None, [
'{"reflect-out", 1, 0, \'u\'},',
]),
Conditional(self.opt, '', self.opt.xor_out is None, [
'{"xor-out", 1, 0, \'o\'},',
]),
'{"verbose", 0, 0, \'v\'},',
'{"check-string", 1, 0, \'s\'},',
Conditional(self.opt, '', self.opt.width is None, [
'{"table-idx-with", 1, 0, \'t\'},',
]),
'{0, 0, 0, 0}',
]),
'};',
'',
'while (1) {',
CodeGen(self.opt, 4*' ', [
'option_index = 0;',
'',
'c = getopt_long(argc, argv, "w:p:n:i:u:o:s:vt", long_options, &option_index);',
'if (c == -1)',
CodeGen(self.opt, 4*' ', [
'break;',
]),
'',
'switch (c) {',
CodeGen(self.opt, 4*' ', [
'case 0:',
CodeGen(self.opt, 4*' ', [
'printf("option %s", long_options[option_index].name);',
'if (optarg)',
CodeGen(self.opt, 4*' ', [
'printf(" with arg %s", optarg);',
]),
'printf("\\n");',
'break;',
]),
Conditional(self.opt, '', self.opt.width is None, [
'case \'w\':',
CodeGen(self.opt, 4*' ', [
'cfg->width = atoi(optarg);',
'break;',
]),
]),
Conditional(self.opt, '', self.opt.poly is None, [
'case \'p\':',
CodeGen(self.opt, 4*' ', [
'cfg->poly = xtoi(optarg);',
'break;',
]),
]),
Conditional(self.opt, '', self.opt.reflect_in is None, [
'case \'n\':',
CodeGen(self.opt, 4*' ', [
'cfg->reflect_in = atob(optarg);',
'break;',
]),
]),
Conditional(self.opt, '', self.opt.xor_in is None, [
'case \'i\':',
CodeGen(self.opt, 4*' ', [
'cfg->xor_in = xtoi(optarg);',
'break;',
]),
]),
Conditional(self.opt, '', self.opt.reflect_out is None, [
'case \'u\':',
CodeGen(self.opt, 4*' ', [
'cfg->reflect_out = atob(optarg);',
| |
index of the view to return (default ``0``).
prefer_lut : bool
When the VOI LUT Module contains both *Window Width*/*Window Center*
and *VOI LUT Sequence*, if ``True`` (default) then apply the VOI LUT,
otherwise apply the windowing operation.
Returns
-------
numpy.ndarray
An array with applied VOI LUT or windowing operation.
Notes
-----
When the dataset requires a modality LUT or rescale operation as part of
the Modality LUT module then that must be applied before any windowing
operation.
See Also
--------
:func:`~pydicom.pixel_data_handlers.util.apply_modality_lut`
:func:`~pydicom.pixel_data_handlers.util.apply_voi`
:func:`~pydicom.pixel_data_handlers.util.apply_windowing`
References
----------
* DICOM Standard, Part 3, :dcm:`Annex C.11.2
<part03/sect_C.11.html#sect_C.11.2>`
* DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5
<part03/sect_C.8.11.3.html#sect_C.8.11.3.1.5>`
* DICOM Standard, Part 4, :dcm:`Annex N.2.1.1
<part04/sect_N.2.html#sect_N.2.1.1>`
"""
valid_voi = False
if 'VOILUTSequence' in ds:
ds.VOILUTSequence = cast(List["Dataset"], ds.VOILUTSequence)
valid_voi = None not in [
ds.VOILUTSequence[0].get('LUTDescriptor', None),
ds.VOILUTSequence[0].get('LUTData', None)
]
valid_windowing = None not in [
ds.get('WindowCenter', None),
ds.get('WindowWidth', None)
]
if valid_voi and valid_windowing:
if prefer_lut:
return apply_voi(arr, ds, index)
return apply_windowing(arr, ds, index)
if valid_voi:
return apply_voi(arr, ds, index)
if valid_windowing:
return apply_windowing(arr, ds, index)
return arr
def apply_voi(
arr: "np.ndarray", ds: "Dataset", index: int = 0
) -> "np.ndarray":
"""Apply a VOI lookup table to `arr`.
.. versionadded:: 2.1
Parameters
----------
arr : numpy.ndarray
The :class:`~numpy.ndarray` to apply the VOI LUT to.
ds : dataset.Dataset
A dataset containing a :dcm:`VOI LUT Module<part03/sect_C.11.2.html>`.
If (0028,3010) *VOI LUT Sequence* is present then returns an array
of ``np.uint8`` or ``np.uint16``, depending on the 3rd value of
(0028,3002) *LUT Descriptor*, otherwise `arr` will be returned
unchanged.
index : int, optional
When the VOI LUT Module contains multiple alternative views, this is
the index of the view to return (default ``0``).
Returns
-------
numpy.ndarray
An array with applied VOI LUT.
See Also
--------
:func:`~pydicom.pixel_data_handlers.util.apply_modality_lut`
:func:`~pydicom.pixel_data_handlers.util.apply_windowing`
References
----------
* DICOM Standard, Part 3, :dcm:`Annex C.11.2
<part03/sect_C.11.html#sect_C.11.2>`
* DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5
<part03/sect_C.8.11.3.html#sect_C.8.11.3.1.5>`
* DICOM Standard, Part 4, :dcm:`Annex N.2.1.1
<part04/sect_N.2.html#sect_N.2.1.1>`
"""
if "VOILUTSequence" not in ds:
return arr
if not np.issubdtype(arr.dtype, np.integer):
warnings.warn(
"Applying a VOI LUT on a float input array may give "
"incorrect results"
)
# VOI LUT Sequence contains one or more items
item = cast(List["Dataset"], ds.VOILUTSequence)[index]
lut_descriptor = cast(List[int], item.LUTDescriptor)
nr_entries = lut_descriptor[0] or 2**16
first_map = lut_descriptor[1]
# PS3.3 C.8.11.3.1.5: may be 8, 10-16
nominal_depth = lut_descriptor[2]
if nominal_depth in list(range(10, 17)):
dtype = 'uint16'
elif nominal_depth == 8:
dtype = 'uint8'
else:
raise NotImplementedError(
f"'{nominal_depth}' bits per LUT entry is not supported"
)
# Ambiguous VR, US or OW
unc_data: Iterable[int]
if item['LUTData'].VR == 'OW':
endianness = '<' if ds.is_little_endian else '>'
unpack_fmt = f'{endianness}{nr_entries}H'
unc_data = unpack(unpack_fmt, cast(bytes, item.LUTData))
else:
unc_data = cast(List[int], item.LUTData)
lut_data: "np.ndarray" = np.asarray(unc_data, dtype=dtype)
# IVs < `first_map` get set to first LUT entry (i.e. index 0)
clipped_iv = np.zeros(arr.shape, dtype=dtype)
# IVs >= `first_map` are mapped by the VOI LUT
# `first_map` may be negative, positive or 0
mapped_pixels = arr >= first_map
clipped_iv[mapped_pixels] = arr[mapped_pixels] - first_map
# IVs > number of entries get set to last entry
np.clip(clipped_iv, 0, nr_entries - 1, out=clipped_iv)
return cast("np.ndarray", lut_data[clipped_iv])
def apply_windowing(
arr: "np.ndarray", ds: "Dataset", index: int = 0
) -> "np.ndarray":
"""Apply a windowing operation to `arr`.
.. versionadded:: 2.1
Parameters
----------
arr : numpy.ndarray
The :class:`~numpy.ndarray` to apply the windowing operation to.
ds : dataset.Dataset
A dataset containing a :dcm:`VOI LUT Module<part03/sect_C.11.2.html>`.
If (0028,1050) *Window Center* and (0028,1051) *Window Width* are
present then returns an array of ``np.float64``, otherwise `arr` will
be returned unchanged.
index : int, optional
When the VOI LUT Module contains multiple alternative views, this is
the index of the view to return (default ``0``).
Returns
-------
numpy.ndarray
An array with applied windowing operation.
Notes
-----
When the dataset requires a modality LUT or rescale operation as part of
the Modality LUT module then that must be applied before any windowing
operation.
See Also
--------
:func:`~pydicom.pixel_data_handlers.util.apply_modality_lut`
:func:`~pydicom.pixel_data_handlers.util.apply_voi`
References
----------
* DICOM Standard, Part 3, :dcm:`Annex C.11.2
<part03/sect_C.11.html#sect_C.11.2>`
* DICOM Standard, Part 3, :dcm:`Annex C.8.11.3.1.5
<part03/sect_C.8.11.3.html#sect_C.8.11.3.1.5>`
* DICOM Standard, Part 4, :dcm:`Annex N.2.1.1
<part04/sect_N.2.html#sect_N.2.1.1>`
"""
if "WindowWidth" not in ds and "WindowCenter" not in ds:
return arr
if ds.PhotometricInterpretation not in ['MONOCHROME1', 'MONOCHROME2']:
raise ValueError(
"When performing a windowing operation only 'MONOCHROME1' and "
"'MONOCHROME2' are allowed for (0028,0004) Photometric "
"Interpretation"
)
# May be LINEAR (default), LINEAR_EXACT, SIGMOID or not present, VM 1
voi_func = cast(str, getattr(ds, 'VOILUTFunction', 'LINEAR')).upper()
# VR DS, VM 1-n
elem = ds['WindowCenter']
center = (
cast(List[float], elem.value)[index] if elem.VM > 1 else elem.value
)
center = cast(float, center)
elem = ds['WindowWidth']
width = cast(List[float], elem.value)[index] if elem.VM > 1 else elem.value
width = cast(float, width)
# The output range depends on whether or not a modality LUT or rescale
# operation has been applied
ds.BitsStored = cast(int, ds.BitsStored)
y_min: float
y_max: float
if 'ModalityLUTSequence' in ds:
# Unsigned - see PS3.3 C.11.1.1.1
y_min = 0
item = cast(List["Dataset"], ds.ModalityLUTSequence)[0]
bit_depth = cast(List[int], item.LUTDescriptor)[2]
y_max = 2**bit_depth - 1
elif ds.PixelRepresentation == 0:
# Unsigned
y_min = 0
y_max = 2**ds.BitsStored - 1
else:
# Signed
y_min = -2**(ds.BitsStored - 1)
y_max = 2**(ds.BitsStored - 1) - 1
slope = ds.get('RescaleSlope', None)
intercept = ds.get('RescaleIntercept', None)
if slope is not None and intercept is not None:
ds.RescaleSlope = cast(float, ds.RescaleSlope)
ds.RescaleIntercept = cast(float, ds.RescaleIntercept)
# Otherwise its the actual data range
y_min = y_min * ds.RescaleSlope + ds.RescaleIntercept
y_max = y_max * ds.RescaleSlope + ds.RescaleIntercept
y_range = y_max - y_min
arr = arr.astype('float64')
if voi_func in ['LINEAR', 'LINEAR_EXACT']:
# PS3.3 C.11.2.1.2.1 and C.11.2.1.3.2
if voi_func == 'LINEAR':
if width < 1:
raise ValueError(
"The (0028,1051) Window Width must be greater than or "
"equal to 1 for a 'LINEAR' windowing operation"
)
center -= 0.5
width -= 1
elif width <= 0:
raise ValueError(
"The (0028,1051) Window Width must be greater than 0 "
"for a 'LINEAR_EXACT' windowing operation"
)
below = arr <= (center - width / 2)
above = arr > (center + width / 2)
between = np.logical_and(~below, ~above)
arr[below] = y_min
arr[above] = y_max
if between.any():
arr[between] = (
((arr[between] - center) / width + 0.5) * y_range + y_min
)
elif voi_func == 'SIGMOID':
# PS3.3 C.11.2.1.3.1
if width <= 0:
raise ValueError(
"The (0028,1051) Window Width must be greater than 0 "
"for a 'SIGMOID' windowing operation"
)
arr = y_range / (1 + np.exp(-4 * (arr - center) / width)) + y_min
else:
raise ValueError(
f"Unsupported (0028,1056) VOI LUT Function value '{voi_func}'"
)
return arr
def convert_color_space(
arr: "np.ndarray", current: str, desired: str, per_frame: bool = False
) -> "np.ndarray":
"""Convert the image(s) in `arr` from one color space to another.
.. versionchanged:: 1.4
Added support for ``YBR_FULL_422``
.. versionchanged:: 2.2
Added `per_frame` keyword parameter.
Parameters
----------
arr : numpy.ndarray
The image(s) as a :class:`numpy.ndarray` with
:attr:`~numpy.ndarray.shape` (frames, rows, columns, 3)
or (rows, columns, 3).
current : str
The current color space, should be a valid value for (0028,0004)
*Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``,
``'YBR_FULL_422'``.
desired : str
The desired color space, should be a valid value for (0028,0004)
*Photometric Interpretation*. One of ``'RGB'``, ``'YBR_FULL'``,
``'YBR_FULL_422'``.
per_frame : bool, optional
If ``True`` and the input array contains multiple frames then process
each frame individually to reduce memory usage. Default ``False``.
Returns
-------
numpy.ndarray
The image(s) converted to the desired color space.
References
----------
* DICOM Standard, Part 3,
:dcm:`Annex C.7.6.3.1.2<part03/sect_C.7.6.3.html#sect_C.7.6.3.1.2>`
* ISO/IEC 10918-5:2012 (`ITU T.871
<https://www.ijg.org/files/T-REC-T.871-201105-I!!PDF-E.pdf>`_),
Section 7
"""
def _no_change(arr: "np.ndarray") -> "np.ndarray":
return arr
_converters = {
'YBR_FULL_422': {
'YBR_FULL_422': _no_change,
'YBR_FULL': _no_change,
'RGB': _convert_YBR_FULL_to_RGB,
},
'YBR_FULL': {
'YBR_FULL': _no_change,
'YBR_FULL_422': _no_change,
'RGB': _convert_YBR_FULL_to_RGB,
},
'RGB': {
'RGB': _no_change,
'YBR_FULL': _convert_RGB_to_YBR_FULL,
'YBR_FULL_422': _convert_RGB_to_YBR_FULL,
}
}
try:
converter = _converters[current][desired]
except KeyError:
raise NotImplementedError(
f"Conversion from {current} to {desired} is not supported."
)
if len(arr.shape) == 4 and per_frame:
for idx, frame | |
(prefix + "p58291433-p58454173.7z"),
page_ids=range(58291433, 58454174),
darus_id=93849,
sha1="28ec6ece5639810ac14817d452f0f089faabb4ed",
size=390949888,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58454174-p58655598.7z"),
page_ids=range(58454174, 58655599),
darus_id=93850,
sha1="dfd8dd8137e0f32bd71495c0722c9c424088b71d",
size=491834880,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58655599-p58788270.7z"),
page_ids=range(58655599, 58788271),
darus_id=93852,
sha1="b650571a33707719bea30214c38685109967ac56",
size=359981969,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58788271-p58944638.7z"),
page_ids=range(58788271, 58944639),
darus_id=93854,
sha1="2224cfe3ea44232084477b0bf535f86fc894c25a",
size=384995178,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p58944639-p59108291.7z"),
page_ids=range(58944639, 59108292),
darus_id=93855,
sha1="a792b3867179201879705b5c1319859a68855798",
size=398886497,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59108292-p59273992.7z"),
page_ids=range(59108292, 59273993),
darus_id=93857,
sha1="70e96ede1c0a566d69bc07292840d1f8daabc82c",
size=393477571,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59273993-p59405079.7z"),
page_ids=range(59273993, 59405080),
darus_id=93858,
sha1="e2378a47ba803a4026cb58ebfcee45c430a0c1c7",
size=319587998,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59405080-p59505406.7z"),
page_ids=range(59405080, 59505407),
darus_id=93860,
sha1="e8599190c0a76188ddaf93a1bbeb602e7e0959bb",
size=242521800,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59505407-p59649436.7z"),
page_ids=range(59505407, 59649437),
darus_id=93861,
sha1="3b2d9f280401c8385b19282b06c9fccd4271caf9",
size=335977003,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59649437-p59781420.7z"),
page_ids=range(59649437, 59781421),
darus_id=93864,
sha1="6dc6460c2ae3f0b13c211b899c80869d94168bc7",
size=335740858,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59781421-p59918839.7z"),
page_ids=range(59781421, 59918840),
darus_id=93865,
sha1="6c702a6d79ad7e5caaba2caf12f055bd4c70a4d4",
size=355031560,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p59918840-p60065594.7z"),
page_ids=range(59918840, 60065595),
darus_id=93866,
sha1="f391729353dcdd61fee0d9158a13579584e00d1b",
size=341425717,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60065595-p60192698.7z"),
page_ids=range(60065595, 60192699),
darus_id=93868,
sha1="b70fcb722523571badf6c52873c15f223fc8ea6a",
size=321021870,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60192699-p60322125.7z"),
page_ids=range(60192699, 60322126),
darus_id=93869,
sha1="2f4c5c3b7882ae29f28af8e89f42444e9ebba476",
size=318669180,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60322126-p60459703.7z"),
page_ids=range(60322126, 60459704),
darus_id=93870,
sha1="d137974069f3148444b8f7277835e361e556e39d",
size=327385112,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60459704-p60587338.7z"),
page_ids=range(60459704, 60587339),
darus_id=93872,
sha1="15b7fa620dba7e299321ec825fe36ce2e2b611b4",
size=323288580,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60587339-p60701562.7z"),
page_ids=range(60587339, 60701563),
darus_id=93874,
sha1="869438df935473c776d3f3a047a3cde32a425b84",
size=291455455,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60701563-p60854585.7z"),
page_ids=range(60701563, 60854586),
darus_id=93875,
sha1="b0adc4377fea6c478a43fec130e20ba829a5cbbc",
size=366710391,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p60854586-p61032550.7z"),
page_ids=range(60854586, 61032551),
darus_id=93876,
sha1="19039604d179ced3ff82609c75f119b96ee4080f",
size=351933426,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61032551-p61246796.7z"),
page_ids=range(61032551, 61246797),
darus_id=93878,
sha1="916f1b1098c546af13a49562cadd13fc630615b6",
size=386259928,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61246797-p61363915.7z"),
page_ids=range(61246797, 61363916),
darus_id=93879,
sha1="843253de8172ab8f80f18edb106eed90c6a7a2c4",
size=258187087,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61363916-p61461961.7z"),
page_ids=range(61363916, 61461962),
darus_id=93881,
sha1="b536d6f6e2a73c633f2b6b5b0b57be0053a3127d",
size=253511683,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61461962-p61563343.7z"),
page_ids=range(61461962, 61563344),
darus_id=93882,
sha1="7d79993c1c0fb491676d385cec55423218738c26",
size=248348354,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61563344-p61691958.7z"),
page_ids=range(61563344, 61691959),
darus_id=93884,
sha1="96baeb0f1af7dc267742e70cf633075193ad82c8",
size=313748513,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61691959-p61827438.7z"),
page_ids=range(61691959, 61827439),
darus_id=93885,
sha1="50df67d3a39c42a94059bb82c2ae043671ce4164",
size=355764491,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61827439-p61940925.7z"),
page_ids=range(61827439, 61940926),
darus_id=93886,
sha1="edb3d10cafde0b573b470d8e2ef78be93887ad06",
size=301309821,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61940926-p61951134.7z"),
page_ids=range(61940926, 61951135),
darus_id=93887,
sha1="f67142a3c922285046d948f53fac141e6982eed7",
size=36308000,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61951135-p61999598.7z"),
page_ids=range(61951135, 61999599),
darus_id=93888,
sha1="ef141590d9f88671e9eafc5b145445de9766fdee",
size=145070715,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p61999599-p62009330.7z"),
page_ids=range(61999599, 62009331),
darus_id=93889,
sha1="24375fe0e830e57db832a4ec083dfd21d542bc2d",
size=37997985,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62009331-p62015421.7z"),
page_ids=range(62009331, 62015422),
darus_id=93890,
sha1="d34367247d255e8468817b785123833e9066c1cd",
size=30438912,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62015422-p62021053.7z"),
page_ids=range(62015422, 62021054),
darus_id=93891,
sha1="1b41da0e9e220d94326ec3f55095ac79fc587ca9",
size=32241481,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62021054-p62038584.7z"),
page_ids=range(62021054, 62038585),
darus_id=93892,
sha1="e823cd1b7b031738a37c13f8468b6b319cbb0965",
size=75265420,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62038585-p62066422.7z"),
page_ids=range(62038585, 62066423),
darus_id=93893,
sha1="98ec6b8f116508888e802d7e7f89a72b509d9509",
size=93973393,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62066423-p62077450.7z"),
page_ids=range(62066423, 62077451),
darus_id=93895,
sha1="edab474d9ac28a6f28c29211ea281351db8dd4d0",
size=44654083,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62077451-p62087506.7z"),
page_ids=range(62077451, 62087507),
darus_id=93896,
sha1="049f89300d80b34dcfeb8d8d13c6d650507351d4",
size=69447980,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62087507-p62253005.7z"),
page_ids=range(62087507, 62253006),
darus_id=93897,
sha1="cd1a65572e9870d9a93aaecf88d0d092dcd54ebf",
size=327535342,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62253006-p62413174.7z"),
page_ids=range(62253006, 62413175),
darus_id=93898,
sha1="32d5d03385ae2e549b0063c40f80b67f33397fbe",
size=459205462,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62413175-p62632019.7z"),
page_ids=range(62413175, 62632020),
darus_id=93899,
sha1="4034378b8c4de87449efc98596c281a806772a9a",
size=394245177,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62632020-p62799095.7z"),
page_ids=range(62632020, 62799096),
darus_id=93901,
sha1="3612829d4bb6566e43cab3e883ede665fa84184c",
size=367246891,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62799096-p62938309.7z"),
page_ids=range(62799096, 62938310),
darus_id=93902,
sha1="84cd357999764c803987a8b2933c15c2563ab304",
size=420741785,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p62938310-p63030244.7z"),
page_ids=range(62938310, 63030245),
darus_id=93904,
sha1="0ab45f74479487ef45e7d43b959143e4187dbcfb",
size=210007285,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63030245-p63114211.7z"),
page_ids=range(63030245, 63114212),
darus_id=93905,
sha1="7910017f674550834ec59dc61a2c18d75a69403d",
size=194456326,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63114212-p63278403.7z"),
page_ids=range(63114212, 63278404),
darus_id=93906,
sha1="3681c3b4f22272e8c9bf91c2edf83604de132ffb",
size=363905129,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63278404-p63479808.7z"),
page_ids=range(63278404, 63479809),
darus_id=93908,
sha1="3ea0f89147f21d7674b5a0489d44aa4e95d059ab",
size=432617094,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63479809-p63664031.7z"),
page_ids=range(63479809, 63664032),
darus_id=93910,
sha1="29208ecc5cd54423c9d9713fda6ee7d9d434fe5e",
size=399726060,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63664032-p63828840.7z"),
page_ids=range(63664032, 63828841),
darus_id=93911,
sha1="54ace373ea5b7ab0c7863e64bc57c4f4e340913b",
size=394301213,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p63828841-p64022670.7z"),
page_ids=range(63828841, 64022671),
darus_id=93913,
sha1="2ef639ebb5146d9e5b35e5aac17737d840218a5e",
size=410690263,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64022671-p64258411.7z"),
page_ids=range(64022671, 64258412),
darus_id=93914,
sha1="7b86b947d9248295609566a4d0f2a538af708435",
size=454315946,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64258412-p64417768.7z"),
page_ids=range(64258412, 64417769),
darus_id=93915,
sha1="41a637f89fc9bf2809a7457821ccc625893588b7",
size=340815044,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64417769-p64591960.7z"),
page_ids=range(64417769, 64591961),
darus_id=93917,
sha1="0283c0c40c208ffc8aac7120625a31fc8b5c91a1",
size=351788739,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64591961-p64767773.7z"),
page_ids=range(64591961, 64767774),
darus_id=93918,
sha1="293aeb8c63d07fd8cc83988504cbecf5289b7d05",
size=373854099,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p64767774-p65063475.7z"),
page_ids=range(64767774, 65063476),
darus_id=93919,
sha1="bb8f71a1b3d087a54ed57ea264de19ba82c232ed",
size=336294730,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65063476-p65195512.7z"),
page_ids=range(65063476, 65195513),
darus_id=93922,
sha1="dee337cf188f173ee9c09b1e76b9407ab7e7ff5e",
size=227657765,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65195513-p65286578.7z"),
page_ids=range(65195513, 65286579),
darus_id=93924,
sha1="10de79fc6f20ceb4d565931a6d8978c6490b332e",
size=214250281,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65286579-p65393993.7z"),
page_ids=range(65286579, 65393994),
darus_id=93925,
sha1="c4d1356ccf7eb4d39d8679195f87a671a97a9464",
size=196404385,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65393994-p65557534.7z"),
page_ids=range(65393994, 65557535),
darus_id=93926,
sha1="d5248202d519faf31a998f395395b8ea0089308b",
size=236593221,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65557535-p65585258.7z"),
page_ids=range(65557535, 65585259),
darus_id=93927,
sha1="1822269a6c1cc0ff14fd85b25e0372a3dcd06143",
size=84539163,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65585259-p65757268.7z"),
page_ids=range(65585259, 65757269),
darus_id=93929,
sha1="d15fd37498dff72e2b77bec222eaa0530a7a7d9a",
size=341618562,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p65757269-p66077482.7z"),
page_ids=range(65757269, 66077483),
darus_id=93930,
sha1="d8be7a87edbe8f9255528279e921effc470802e1",
size=550538341,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66077483-p66255364.7z"),
page_ids=range(66077483, 66255365),
darus_id=93932,
sha1="8ee1214ef55636c0b9a8ef3fa089ebac3e0d417a",
size=400353691,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66255365-p66509805.7z"),
page_ids=range(66255365, 66509806),
darus_id=93934,
sha1="1e4bf2dbedb3153d29c6ccd7ca25e0152977701a",
size=453658776,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66509806-p66781694.7z"),
page_ids=range(66509806, 66781695),
darus_id=93935,
sha1="e287b11bf04c3ce5e57cc4cda69efa1d9a98bdb3",
size=436658360,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p66781695-p67076296.7z"),
page_ids=range(66781695, 67076297),
darus_id=93938,
sha1="6430f13221566eb60f563a87f24c1b789dcb382d",
size=470151252,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67076297-p67448269.7z"),
page_ids=range(67076297, 67448270),
darus_id=93940,
sha1="c587686dbf2490c7a31e6495ce4f90b26331e548",
size=522105960,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67448270-p67746260.7z"),
page_ids=range(67448270, 67746261),
darus_id=93941,
sha1="fd254f9771a71ca1fa7969254ed330fccc3b853f",
size=537545010,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p67746261-p68099469.7z"),
page_ids=range(67746261, 68099470),
darus_id=93942,
sha1="1a9e7b4d2a04feb0f634f6fee90cb69648935031",
size=662478781,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68099470-p68432080.7z"),
page_ids=range(68099470, 68432081),
darus_id=93944,
sha1="7146513aa5a49d954cfb0fb4a37a0bc72c03e93d",
size=670877121,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68432081-p68740980.7z"),
page_ids=range(68432081, 68740981),
darus_id=93946,
sha1="bbd1e39dda38dd06fc039e9e2ccc3cd38846f000",
size=629039543,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68740981-p68962162.7z"),
page_ids=range(68740981, 68962163),
darus_id=93948,
sha1="9d3420f9e70947c3e779e35644513655b9600775",
size=463759879,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p68962163-p69247397.7z"),
page_ids=range(68962163, 69247398),
darus_id=93949,
sha1="c01cec3dcabc68db4a0a0dec5382662768ffb3ae",
size=527607930,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69247398-p69576596.7z"),
page_ids=range(69247398, 69576597),
darus_id=93951,
sha1="96de5262a43bbbb4b7c65a770989310b7e6d40e9",
size=577802667,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69576597-p69963244.7z"),
page_ids=range(69576597, 69963245),
darus_id=93952,
sha1="7d23f5465bf66aa2640c9080a6ac6dd629d8c97d",
size=681139320,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p69963245-p70352985.7z"),
page_ids=range(69963245, 70352986),
darus_id=93955,
sha1="c8da29e8a7f921b6956996f50d46e3d32f4f12a9",
size=666253147,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70352986-p70755365.7z"),
page_ids=range(70352986, 70755366),
darus_id=93957,
sha1="e005965a08a4d64668436b43ea6e4f52cc4de4bd",
size=694472993,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70755366-p70952447.7z"),
page_ids=range(70755366, 70952448),
darus_id=93959,
sha1="238bcde4bcae17e89e387082fae0e1dba7c0aed6",
size=363387161,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70952448-p70957232.7z"),
page_ids=range(70952448, 70957233),
darus_id=93960,
sha1="4706a1578bb0e4ceae8d16fcf40ccf487e8d6809",
size=25114920,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70957233-p70961768.7z"),
page_ids=range(70957233, 70961769),
darus_id=93961,
sha1="5e1715cea49a87bb31027f777dd98d0f90c4867f",
size=25116810,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70961769-p70966625.7z"),
page_ids=range(70961769, 70966626),
darus_id=93962,
sha1="710c8dea6822a2cf16f39d113c32a57bf2279b1e",
size=26120757,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70966626-p70969258.7z"),
page_ids=range(70966626, 70969259),
darus_id=93963,
sha1="eb5e8eecc182ed4e82cb491f0d45fabbcd764e08",
size=22754125,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70969259-p70971691.7z"),
page_ids=range(70969259, 70971692),
darus_id=93964,
sha1="35644a9f691b9c265b08af7c9906b2b4655fd4b5",
size=23047090,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70971692-p70974507.7z"),
page_ids=range(70971692, 70974508),
darus_id=93965,
sha1="9eedde4147cba05a24208bb54b9032889c8a2634",
size=25361923,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p70974508-p71048851.7z"),
page_ids=range(70974508, 71048852),
darus_id=93967,
sha1="6fb13994afd1d2a9b7093c5c1e3ad56521e5badc",
size=150700328,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71048852-p71445591.7z"),
page_ids=range(71048852, 71445592),
darus_id=93968,
sha1="2f2742ea3742cd6e73c7558e35e74076da47e615",
size=686827945,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71445592-p71805048.7z"),
page_ids=range(71445592, 71805049),
darus_id=93969,
sha1="86a83ceafe208c8428b75138e886ee3ffd68e428",
size=649759538,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p71805049-p72094286.7z"),
page_ids=range(71805049, 72094287),
darus_id=93972,
sha1="cb8f2996b700e18e4a443109a9f4611643a16a23",
size=501260586,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72094287-p72339340.7z"),
page_ids=range(72094287, 72339341),
darus_id=93974,
sha1="e7ea4a9a811b1d06c54e12abe58b40359ecbe3e5",
size=455159826,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72339341-p72722930.7z"),
page_ids=range(72339341, 72722931),
darus_id=93975,
sha1="7817b6bd70b040bb3d6647584d001e4bf71e90e4",
size=679131033,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p72722931-p73077030.7z"),
page_ids=range(72722931, 73077031),
darus_id=93977,
sha1="a99d418ca887a8b8984da57a594ef08a1aba940d",
size=666838702,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73077031-p73455249.7z"),
page_ids=range(73077031, 73455250),
darus_id=93979,
sha1="614652f2d4e47ba87fcaaa8e236c1bacd6a7da8a",
size=677695737,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73455250-p73825768.7z"),
page_ids=range(73455250, 73825769),
darus_id=93981,
sha1="f02809cf1f8aad741a27e065dcb37e4518d44165",
size=783719465,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p73825769-p74197527.7z"),
page_ids=range(73825769, 74197528),
darus_id=93984,
sha1="1f79804ea7f45f76dfc68c27fb9d8c5e68567056",
size=719936868,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74197528-p74596141.7z"),
page_ids=range(74197528, 74596142),
darus_id=93986,
sha1="c23ea0aefdeec0a7d740f9701709b1d4ac17c216",
size=766113603,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74596142-p74803927.7z"),
page_ids=range(74596142, 74803928),
darus_id=93988,
sha1="f592caaf678ad44003aaf5ab3d5647766e5cc012",
size=444072573,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74803928-p74933695.7z"),
page_ids=range(74803928, 74933696),
darus_id=93989,
sha1="8f93d31ac08ee48a0e2f7613830cc627f152500f",
size=299597329,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p74933696-p75091810.7z"),
page_ids=range(74933696, 75091811),
darus_id=93990,
sha1="6c87fba6626278a1067d652118b5c1860f743884",
size=320486961,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75091811-p75281950.7z"),
page_ids=range(75091811, 75281951),
darus_id=93991,
sha1="ad72748f000aa81ada245d01fb81fe262641920c",
size=361785075,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75281951-p75472873.7z"),
page_ids=range(75281951, 75472874),
darus_id=93993,
sha1="7a2b4857a03cf7416e699dd459f8ec58c972f18d",
size=363788801,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75472874-p75649065.7z"),
page_ids=range(75472874, 75649066),
darus_id=93994,
sha1="6ccd8d8f9a3cc23537e9081b3b2c1c93fe0ab6a6",
size=324980520,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75649066-p75798893.7z"),
page_ids=range(75649066, 75798894),
darus_id=93995,
| |
<reponame>zyksir/NIKE
# !/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import logging
import numpy as np
from torch.nn.init import xavier_normal_, xavier_normal
import torch
import torch.nn as nn
import torch.nn.functional as F
from IPython import embed
from sklearn.metrics import average_precision_score
from torch.autograd import Variable
from torch.utils.data import DataLoader
from dataloader import TestDataset
import heapq
class TopKHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, elem):
if len(self.data) < self.k:
heapq.heappush(self.data, elem)
else:
topk_small = self.data[0]
if elem > topk_small:
heapq.heapreplace(self.data, elem)
def topk(self):
return [x for x in reversed([heapq.heappop(self.data) for x in range(len(self.data))])]
class KGEModel(nn.Module):
def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma,
double_entity_embedding=False, double_relation_embedding=False):
super(KGEModel, self).__init__()
self.model_name = model_name
self.nentity = nentity
self.nrelation = nrelation
self.hidden_dim = hidden_dim
self.epsilon = 2.0
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / hidden_dim]),
requires_grad=False
)
self.entity_dim = hidden_dim * 2 if double_entity_embedding else hidden_dim
self.relation_dim = hidden_dim * 2 if double_relation_embedding else hidden_dim
self.entity_embedding = nn.Parameter(torch.zeros(nentity, self.entity_dim))
nn.init.uniform_(
tensor=self.entity_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
self.relation_embedding = nn.Parameter(torch.zeros(nrelation, self.relation_dim))
nn.init.uniform_(
tensor=self.relation_embedding,
a=-self.embedding_range.item(),
b=self.embedding_range.item()
)
if model_name == 'pRotatE':
self.modulus = nn.Parameter(torch.Tensor([[0.5 * self.embedding_range.item()]]))
# Do not forget to modify this line when you add a new model in the "forward" function
if model_name not in ['TransE', 'DistMult', 'ComplEx', 'RotatE', 'pRotatE']:
raise ValueError('model %s not supported' % model_name)
if model_name == 'RotatE' and (not double_entity_embedding or double_relation_embedding):
raise ValueError('RotatE should use --double_entity_embedding')
if model_name == 'ComplEx' and (not double_entity_embedding or not double_relation_embedding):
raise ValueError('ComplEx should use --double_entity_embedding and --double_relation_embedding')
self.init()
def init(self):
logging.info("xavier_normal_ the parameters")
xavier_normal_(self.entity_embedding)
xavier_normal_(self.relation_embedding)
def forward(self, sample, mode='single'):
'''
Forward function that calculate the score of a batch of triples.
In the 'single' mode, sample is a batch of triple.
In the 'head-batch' or 'tail-batch' mode, sample consists two part.
The first part is usually the positive sample.
And the second part is the entities in the negative samples.
Because negative samples and positive samples usually share two elements
in their triple ((head, relation) or (relation, tail)).
'''
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
self.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
self.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
self.entity_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
else:
raise ValueError('mode %s not supported' % mode)
model_func = {
'TransE': self.TransE,
'DistMult': self.DistMult,
'ComplEx': self.ComplEx,
'RotatE': self.RotatE,
'pRotatE': self.pRotatE
}
if self.model_name in model_func:
score = model_func[self.model_name](head, relation, tail, mode)
else:
raise ValueError('model %s not supported' % self.model_name)
return score
def TransE(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head + (relation - tail)
else:
score = (head + relation) - tail
score = self.gamma.item() - torch.norm(score, p=1, dim=2)
return score
def DistMult(self, head, relation, tail, mode):
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def ComplEx(self, head, relation, tail, mode):
re_head, im_head = torch.chunk(head, 2, dim=2)
re_relation, im_relation = torch.chunk(relation, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
score = re_head * re_score + im_head * im_score
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = re_score * re_tail + im_score * im_tail
score = score.sum(dim=2)
return score
def RotatE(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == 'head-batch':
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
def pRotatE(self, head, relation, tail, mode):
pi = 3.14159262358979323846
# Make phases of entities and relations uniformly distributed in [-pi, pi]
phase_head = head / (self.embedding_range.item() / pi)
phase_relation = relation / (self.embedding_range.item() / pi)
phase_tail = tail / (self.embedding_range.item() / pi)
if mode == 'head-batch':
score = phase_head + (phase_relation - phase_tail)
else:
score = (phase_head + phase_relation) - phase_tail
score = torch.sin(score)
score = torch.abs(score)
score = self.gamma.item() - score.sum(dim=2) * self.modulus
return score
def get_embedding(self, model, sample, mode="single"):
if mode == 'single':
batch_size, negative_sample_size = sample.size(0), 1
head = torch.index_select(
model.entity_embedding,
dim=0,
index=sample[:, 0]
).unsqueeze(1)
relation = torch.index_select(
model.relation_embedding,
dim=0,
index=sample[:, 1]
).unsqueeze(1)
tail = torch.index_select(
model.entity_embedding,
dim=0,
index=sample[:, 2]
).unsqueeze(1)
elif mode == 'head-batch':
tail_part, head_part = sample
batch_size, negative_sample_size = head_part.size(0), head_part.size(1)
head = torch.index_select(
model.entity_embedding,
dim=0,
index=head_part.view(-1)
).view(batch_size, negative_sample_size, -1)
relation = torch.index_select(
model.relation_embedding,
dim=0,
index=tail_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
model.entity_embedding,
dim=0,
index=tail_part[:, 2]
).unsqueeze(1)
elif mode == 'tail-batch':
head_part, tail_part = sample
batch_size, negative_sample_size = tail_part.size(0), tail_part.size(1)
head = torch.index_select(
model.entity_embedding,
dim=0,
index=head_part[:, 0]
).unsqueeze(1)
relation = torch.index_select(
model.relation_embedding,
dim=0,
index=head_part[:, 1]
).unsqueeze(1)
tail = torch.index_select(
model.entity_embedding,
dim=0,
index=tail_part.view(-1)
).view(batch_size, negative_sample_size, -1)
return head, relation, tail
def generate(self, embed_model, pos, neg, mode, n_sample=1, temperature=1.0, train=True, model_name="TransE"):
batch_size, negative_sample_size = neg.size(0), neg.size(1)
scores = self.forward((pos, neg), mode=mode)
probs = torch.softmax(scores, dim=1)
row_idx = torch.arange(0, batch_size).type(torch.LongTensor).unsqueeze(1).expand(batch_size, n_sample)
sample_idx = torch.multinomial(probs, n_sample, replacement=True)
sample_neg = neg[row_idx, sample_idx.data.cpu()].view(batch_size, n_sample)
if train:
return pos, sample_neg, scores, sample_idx, row_idx
else:
return pos, sample_neg
def discriminate_step(self, embed_model, pos, neg, mode, clf_opt, model_name="TransE", args=None):
self.train()
clf_opt.zero_grad()
negative_score = self.forward((pos, neg), mode=mode)
positive_score = self.forward(pos)
if args.negative_adversarial_sampling:
# In self-adversarial sampling, we do not apply back-propagation on the sampling weight
negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim=1).detach()
* F.logsigmoid(-negative_score)).sum(dim=1)
else:
negative_score = F.logsigmoid(-negative_score).mean(dim=1)
positive_score = F.logsigmoid(positive_score).squeeze(dim=1)
positive_sample_loss = - positive_score.mean()
negative_sample_loss = - negative_score.mean()
loss = (positive_sample_loss + negative_sample_loss) / 2
self.zero_grad()
loss.backward()
clf_opt.step()
return loss, negative_sample_loss
@staticmethod
def train_GAN_step(generator, discriminator, opt_gen, opt_dis, train_iterator, epoch_reward, epoch_loss, avg_reward, args):
generator.train()
discriminator.train()
positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
pos, neg, scores, sample_idx, row_idx = generator.generate(generator, positive_sample, negative_sample, mode)
loss, rewards = discriminator.discriminate_step(discriminator, pos, neg, mode, opt_dis, args=args)
epoch_reward += torch.sum(rewards)
epoch_loss += loss
rewards = rewards - avg_reward
generator.zero_grad()
log_probs = F.log_softmax(scores, dim=1)
reinforce_loss = torch.sum(Variable(rewards) * log_probs[row_idx.cuda(), sample_idx.data])
reinforce_loss.backward()
opt_gen.step()
return epoch_reward, epoch_loss, pos.size(0)
@staticmethod
def train_step(model, optimizer, train_iterator, args, generator=None):
'''
A single train step. Apply back-propation and return the loss
'''
model.train()
optimizer.zero_grad()
positive_sample, negative_sample, subsampling_weight, mode = next(train_iterator)
# embed()
if args.cuda:
positive_sample = positive_sample.cuda()
negative_sample = negative_sample.cuda()
subsampling_weight = subsampling_weight.cuda()
if generator is not None:
positive_sample, negative_sample = generator.generate(model, positive_sample, negative_sample, mode,
train=False, n_sample=args.negative_sample_size//2,
model_name=args.model)
negative_score = model((positive_sample, negative_sample), mode=mode)
positive_score = model(positive_sample)
if args.method == "LT":
tmp = (negative_score.mean(dim=1) - positive_score.squeeze(dim=1) + 1.0).tolist()
# train_iterator.dataloader_tail.dataset.update(tmp, positive_sample.tolist())
train_iterator.dataloader_head.dataset.update(tmp, positive_sample.tolist())
# embed()
if args.negative_adversarial_sampling:
# In self-adversarial sampling, we do not apply back-propagation on the sampling weight
negative_score = (F.softmax(negative_score * args.adversarial_temperature, dim=1).detach()
* F.logsigmoid(-negative_score)).sum(dim=1)
else:
negative_score = F.logsigmoid(-negative_score).mean(dim=1)
positive_score = F.logsigmoid(positive_score).squeeze(dim=1)
if args.uni_weight:
positive_sample_loss = - positive_score.mean()
negative_sample_loss = - negative_score.mean()
else:
positive_sample_loss = - (subsampling_weight * positive_score).sum() / subsampling_weight.sum()
negative_sample_loss = - (subsampling_weight * negative_score).sum() / subsampling_weight.sum()
loss = (positive_sample_loss + negative_sample_loss) / 2
if args.regularization != 0.0:
| |
<gh_stars>1-10
"""webapp om teksten in ReST formaat om te zetten naar HTML documenten
presentation layer
"""
# import sys
# import os
import pathlib
import datetime
import cherrypy
## sys.path.append('.')
import rst2html_functions as rhfn
HERE = pathlib.Path(__file__).parent
TEMPLATE = HERE / "rst2html.html"
previewbutton = ('<div style="border: 3px ridge #3a5fcd; border-radius:20px; '
'background-color: #C6E2FF; text-align: center; position: fixed">'
'<a href={}><button accesskey="b">'
'<span style="text-decoration:underline">B</span>ack to editor'
'</button></a></div>')
codemirror_stuff = ['<script src="/static/codemirror/lib/codemirror.js"></script>',
'<link rel="stylesheet" href="/static/codemirror/lib/codemirror.css"/>']
scriptspec = '<script src="/static/codemirror/mode/{}.js"></script>'
scriptdict = {'yaml': ('yaml/yaml',),
'html': ('xml/xml', 'javascript/javascript', 'css/css',
'htmlmixed/htmlmixed'),
'py': ('python/python', '../addon/edit/matchbrackets'),
'rst': ('rst/rst', '../addon/mode/overlay')}
copybuttontext = """\
<a href="/copysearch"><button accesskey="c">
<span style="text-decoration:underline">C</span>opy to file</button></a>"""
def load_template(name):
"load a template file from the base directory"
template = HERE / name
output = ''
with template.open() as _in:
output = _in.read()
return output
def apply_lang(lines, state):
"pas eigengebakken language support toe op tekstregels"
output = []
for line in lines:
while '_(' in line:
start, rest = line.split('_(', 1)
keyword, end = rest.split(')', 1)
line = rhfn.get_text(keyword, state.get_lang()).join((start, end))
output.append(line)
return '\n'.join(output)
def format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, state):
"""build page html out of various parameters and a template file
"""
if state.newfile:
all_source, all_html = [], []
else:
all_source = rhfn.list_files(state.sitename, state.current, rstfile, 'src')
all_html = rhfn.list_files(state.sitename, state.current, htmlfile, 'dest')
lines = load_template("rst2html.html").split('\n')
output = apply_lang(lines, state)
conflist = rhfn.list_confs(settings)
format_stuff = ''
if state.conf.get('highlight', False):
format_stuff = ''.join(codemirror_stuff)
if state.loaded:
format_stuff += ''.join(scriptspec.format(x) for x in scriptdict[state.loaded])
return output.format(all_source, all_html, newfile, mld, rstdata, state.conf['wid'],
state.conf['hig'], conflist, state.loaded, format_stuff)
def format_progress_list(timelist):
"""output the site inventory to html, accentuating the most recently updated
items
parts of this logic belong in the template, but since I'm not using a
template engine I'm implementing it here
"""
output = load_template('stand.html')
first_part, rest = output.split('{% if data %}')
data_part, last_part = rest.split('{% endif %}')
repeat_part, no_data = data_part.split('{% else %}')
thead, rest = repeat_part.split('{% for row in data %}')
repeat_line, tfoot = rest.split('{% endfor %}')
output = [first_part]
if timelist:
output.append(thead)
for docinfo in timelist:
line = repeat_line
items = rhfn.get_progress_line_values(docinfo)
line = line.replace('{row.0}', items[0])
for idx, timestring in enumerate(items[1:]):
timestring = timestring.replace('--> ', '<strong>').replace(' <--', '</strong>')
line = line.replace('{row.%s}' % str(idx + 1), timestring)
output.append(line)
output.append(tfoot)
else:
output.append(no_data)
output.append(last_part)
return ''.join(output)
def resolve_images(rstdata, url, loc, use_sef=False, fname=''):
"""fix the urls in image links so that preview html points to the right place
"""
# TODO: dit houdt er nog geen rekening mee dat hrefs die met / beginnen bedoeld zijn om
# absoluut vanaf de siteroot geladen te worden? dat zit juist in
data = []
pos = rstdata.find('<img')
if pos == -1:
return rstdata
while pos >= 0:
pos2 = rstdata.find('src="', pos) + 5
if rstdata[pos2:].startswith('http'):
pos = pos2
else:
begin = rstdata[:pos2]
# if begin.startswith('/'):
# begin = begin[1:]
data.append(begin)
rstdata = rstdata[pos2:]
from_root = False
if rstdata.startswith('/'):
rstdata = rstdata[1:]
from_root = True
pos = 0
pos = rstdata.find('<img', pos)
data.append(rstdata)
if url:
url = url.rstrip('/') + '/' # make sure url ends with one and only one /
if loc:
url += loc.strip('/') + '/' # if present add loc with no double //'s
if use_sef and fname and fname != 'index' and not from_root:
url += fname + '/'
return url.join(data)
def format_previewdata(state, previewdata, fname, ftype, settings):
"""
Insert a "back to source" button into the HTML to show
arg1 = previewdata: the html to show (text string)
arg2 = filename parameter for the screen to return to
arg3 = type of this file: `rst` or `html`
"""
previewdata = resolve_images(previewdata, state.conf['url'], state.current,
state.conf.get('seflinks', False),
# fname.replace('.rst', '').replace('.html', ''))
fname.replace('.' + ftype, ''))
try:
pos = previewdata.index('>', previewdata.index('<body')) + 1
except ValueError:
start, end = '', previewdata
else:
start, end = previewdata[:pos], previewdata[pos:]
loadrst = '/load{0}/?{0}file={1}&settings={2}'.format(ftype, fname, settings)
previewdata = previewbutton.format(loadrst).join((start, end))
return previewdata
def format_search(results=None):
"build search screen data"
output = load_template('search.html')
first_part, rest = output.split('{% if results %}')
start, rest = rest.split('{% for row in data %}')
line, rest = rest.split('{% endfor %}')
end, last_part = rest.split('{% endif %}')
output = [first_part]
if results:
output.append(start)
for page, lineno, text in results:
out = line.replace('{row.0}', page).replace('{row.1}', str(lineno)).replace('{row.2}',
text)
output.append(out)
output.append(end)
output.append(last_part)
return ''.join(output).replace(' **', '<strong>').replace('** ', '</strong>')
class Rst2Html:
"the actual webapp"
def __init__(self):
"""initialize using imported settings; read template; register directives"""
rhfn.register_directives()
self.state = rhfn.R2hState()
@cherrypy.expose
def index(self):
"""show page with empty fields (and selectable filenames)
"""
rstfile, htmlfile, newfile, mld, rstdata, settings = self.state.index()
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def loadconf(self, settings="", rstfile="", htmlfile="", newfile="", rstdata='', **kwargs):
"""load settings of indicated site
"""
mld, rstdata, settings, newfile = self.state.loadconf(settings, newfile)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def saveconf(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""(re)save settings for selected site name
if new name specified, use that"""
mld, rstdata, settings, newfile = self.state.saveconf(settings, newfile, rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
# @cherrypy.expose # nog testen
def loadxtra(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""load directives file for editing
if non-existent, create from template
"""
mld, rstdata = self.state.loadxtra()
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
# @cherrypy.expose # nog testen
def savextra(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""(re)save directives file
"""
mld, rstdata = self.state.savextra(rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def loadrst(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""load indicated .rst file
pre-builds save-filename by changing extension from rst to html"""
mld, rstdata, htmlfile, newfile = self.state.loadrst(rstfile)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def saverst(self, settings="", rstfile="", newfile="", rstdata="", action='', **kwargs):
"""(re)save rst file using selected name
if new name specified, use that (extension must be .rst)
`action` has a value when rename or delete is checked
"""
action = rhfn.translate_action(action)
if action == 'rename':
mld, rstfile, htmlfile, newfile, rstdata = self.state.rename(rstfile, newfile, rstdata)
elif action == 'revert':
mld, rstfile, htmlfile, newfile, rstdata = self.state.revert(rstfile, rstdata)
elif action == 'delete':
mld, rstfile, htmlfile, newfile, rstdata = self.state.delete(rstfile, rstdata)
else:
mld, rstfile, htmlfile, newfile, rstdata = self.state.saverst(rstfile, newfile, rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def convert(self, settings="", rstfile="", htmlfile='', newfile="", rstdata="", **kwargs):
"""convert rst to html and show result
"""
mld, previewdata, fname = self.state.convert(rstfile, newfile, rstdata)
if mld == '':
return format_previewdata(self.state, previewdata, fname, 'rst', settings)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def saveall(self, settings="", rstfile="", newfile="", rstdata="", **kwargs):
"""(re)save rst file, (re)convert to html and (re)save html file using selected names
"""
mld, rstfile, htmlfile, newfile = self.state.saveall(rstfile, newfile, rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def loadhtml(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""load html file and show code
"""
mld, rstdata, rstfile, htmlfile = self.state.loadhtml(htmlfile)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def showhtml(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""preview the loaded HTML
"""
mld, previewdata, fname = self.state.showhtml(rstdata)
if mld == '':
return format_previewdata(self.state, previewdata, fname, 'html', settings)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def savehtml(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""save displayed (edited) html
"""
mld, rstdata, newfile = self.state.savehtml(htmlfile, newfile, rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def copytoroot(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""copy html to mirror site
"""
mld = self.state.copytoroot(htmlfile, rstdata)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def status(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"get status for current document"
mld = self.state.status(rstfile)
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def makerefdoc(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""build references document
"""
savestuff = (rstfile, htmlfile, rstdata)
result = self.state.makerefdoc()
mld = result[0]
if len(result) == 1:
rstfile, htmlfile, rstdata = savestuff
else:
rstfile, htmlfile, rstdata = result[1:]
return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
@cherrypy.expose
def convert_all(self, settings="", rstfile="", htmlfile="", newfile="", rstdata="", **kwargs):
"""regenerate all html files
"""
regsubj = kwargs.get('regsubj', '')
mld, rstdata = self.state.convert_all(option=regsubj)
outdata = format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)
selector_text = 'select | |
import numpy as np
import tensorflow as tf
import datetime
import ctr_funcs as func
import config_gme as cfg
from time import time
import os
import shutil
# whether to perform warm up
warm_up_bool = cfg.warm_up_bool
test_batch_size = cfg.test_batch_size
meta_mode = cfg.meta_mode
alpha = cfg.alpha
gamma = cfg.gamma
train_file_name_warm = cfg.train_file_name_warm
train_file_name_warm_2 = cfg.train_file_name_warm_2
n_epoch = cfg.n_epoch
label_col_idx = 0
num_csv_col_warm = cfg.num_csv_col_warm
total_num_ft_col_warm = num_csv_col_warm - 1
num_csv_col_w_ngb = cfg.num_csv_col_w_ngb
total_num_ft_col_cold = num_csv_col_w_ngb - 1
# config
# must be from small to large
tar_idx = cfg.tar_idx
attr_idx = cfg.attr_idx
str_txt = cfg.output_file_name
base_path = './tmp'
model_loading_addr = cfg.model_loading_addr
model_saving_addr = base_path + '/meta_' + str_txt + '/'
output_file_name = base_path + '/meta_' + str_txt + '.txt'
save_model_ind = cfg.save_model_ind
train_file_name_a = cfg.train_file_name_a
train_file_name_b = cfg.train_file_name_b
test_file_name = cfg.test_file_name
batch_size = cfg.batch_size
n_ft = cfg.n_ft
k = cfg.k
kp_prob = cfg.kp_prob
n_epoch_meta = cfg.n_epoch_meta
record_step_size = cfg.record_step_size
layer_dim = cfg.layer_dim
att_dim = cfg.att_dim
opt_alg = cfg.opt_alg
n_one_hot_slot = cfg.n_one_hot_slot
n_mul_hot_slot = cfg.n_mul_hot_slot
max_len_per_slot = cfg.max_len_per_slot
input_format = cfg.input_format
n_slot = n_one_hot_slot + n_mul_hot_slot
n_one_hot_slot_ngb = cfg.n_one_hot_slot_ngb
n_mul_hot_slot_ngb = cfg.n_mul_hot_slot_ngb
max_len_per_slot_ngb = cfg.max_len_per_slot_ngb
max_n_ngb_ori = cfg.max_n_ngb_ori
max_n_ngb = cfg.max_n_ngb
n_slot_ngb = n_one_hot_slot_ngb + n_mul_hot_slot_ngb
eta_range = cfg.eta_range
meta_batch_size_range = cfg.meta_batch_size_range
cold_eta_range = cfg.cold_eta_range
meta_eta_range = cfg.meta_eta_range
# key: slot_idx in ori data, val: col_idx in pred_emb
tar_slot_map = {}
for i in range(len(tar_idx)):
tar_slot_map[tar_idx[i]] = i
## create para list
para_list = []
for i in range(len(eta_range)):
for ii in range(len(meta_batch_size_range)):
for iii in range(len(cold_eta_range)):
for iv in range(len(meta_eta_range)):
para_list.append([eta_range[i], meta_batch_size_range[ii], cold_eta_range[iii], \
meta_eta_range[iv]])
## record results
result_list = []
# loop over para_list
for item in para_list:
eta = item[0]
meta_batch_size = item[1]
cold_eta = item[2]
meta_eta = item[3]
tf.reset_default_graph()
# create dir
if not os.path.exists(base_path):
os.mkdir(base_path)
# # remove dir
# if os.path.isdir(model_saving_addr):
# shutil.rmtree(model_saving_addr)
###########################################################
###########################################################
print('Loading data start!')
tf.set_random_seed(123)
if input_format == 'tfrecord':
if warm_up_bool:
train_ft_warm, train_label_warm = func.tfrecord_input_pipeline_test(train_file_name_warm, num_csv_col_warm, batch_size, n_epoch)
train_ft_meta_warm, train_label_meta_warm = func.tfrecord_input_pipeline_test(train_file_name_warm, num_csv_col_warm, batch_size, n_epoch)
test_ft_warm, test_label_warm = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
test_ft_meta_warm, test_label_meta_warm = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
test_ft_copy, test_label_copy = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
# warm up 2
train_ft_warm_2, train_label_warm_2 = func.tfrecord_input_pipeline_test(train_file_name_warm_2, num_csv_col_warm, batch_size, n_epoch)
train_ft_meta_warm_2, train_label_meta_warm_2 = func.tfrecord_input_pipeline_test(train_file_name_warm_2, num_csv_col_warm, batch_size, n_epoch)
test_ft_warm_2, test_label_warm_2 = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
test_ft_meta_warm_2, test_label_meta_warm_2 = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
train_ft_a, train_label_a = func.tfrecord_input_pipeline_test(train_file_name_a, num_csv_col_w_ngb, meta_batch_size, n_epoch_meta)
train_ft_b, train_label_b = func.tfrecord_input_pipeline_test(train_file_name_b, num_csv_col_w_ngb, meta_batch_size, n_epoch_meta)
test_ft, test_label = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
test_ft_meta, test_label_meta = func.tfrecord_input_pipeline_test(test_file_name, num_csv_col_w_ngb, test_batch_size, 1)
########################################################################
########################################################################
def partition_input(x_input):
idx_1 = n_one_hot_slot
idx_2 = idx_1 + n_mul_hot_slot*max_len_per_slot
x_input_one_hot = x_input[:, 0:idx_1]
x_input_mul_hot = x_input[:, idx_1:idx_2]
# shape=[None, n_mul_hot_slot, max_len_per_slot]
x_input_mul_hot = tf.reshape(x_input_mul_hot, (-1, n_mul_hot_slot, max_len_per_slot))
return x_input_one_hot, x_input_mul_hot
# data format (label is removed from x_input)
# tar, ngb (w diff n_fts)
def partition_input_w_ngb(x_input):
# generate idx_list
len_list = []
# tar
len_list.append(n_one_hot_slot)
len_list.append(n_mul_hot_slot*max_len_per_slot)
# ngb
for _ in range(max_n_ngb_ori):
len_list.append(n_one_hot_slot_ngb)
len_list.append(n_mul_hot_slot_ngb*max_len_per_slot_ngb)
len_list = np.array(len_list)
idx_list = np.cumsum(len_list)
x_input_one_hot = x_input[:, 0:idx_list[0]]
x_input_mul_hot = x_input[:, idx_list[0]:idx_list[1]]
# shape=[None, n_mul_hot_slot, max_len_per_slot]
x_input_mul_hot = tf.reshape(x_input_mul_hot, [-1, n_mul_hot_slot, max_len_per_slot])
#######################
# ngb
concat_one_hot_ngb = x_input[:, idx_list[1]:idx_list[2]]
concat_mul_hot_ngb = x_input[:, idx_list[2]:idx_list[3]]
for i in range(1, max_n_ngb_ori):
# one_hot
temp_1 = x_input[:, idx_list[2*i+1]:idx_list[2*i+2]]
concat_one_hot_ngb = tf.concat([concat_one_hot_ngb, temp_1], 1)
# mul_hot
temp_2 = x_input[:, idx_list[2*i+2]:idx_list[2*i+3]]
concat_mul_hot_ngb = tf.concat([concat_mul_hot_ngb, temp_2], 1)
# shape=[None, max_n_ngb, n_one_hot_slot_ngb]
concat_one_hot_ngb = tf.reshape(concat_one_hot_ngb, [-1, max_n_ngb_ori, n_one_hot_slot_ngb])
# shape=[None, max_n_ngb, n_mul_hot_slot_ngb, max_len_per_slot_ngb]
concat_mul_hot_ngb = tf.reshape(concat_mul_hot_ngb, [-1, max_n_ngb_ori, n_mul_hot_slot_ngb, \
max_len_per_slot_ngb])
x_input_one_hot_ngb = concat_one_hot_ngb[:, 0:max_n_ngb, :]
x_input_mul_hot_ngb = concat_mul_hot_ngb[:, 0:max_n_ngb, :, :]
return x_input_one_hot, x_input_mul_hot, x_input_one_hot_ngb, x_input_mul_hot_ngb
# add mask
def get_masked_one_hot(x_input_one_hot):
data_mask = tf.cast(tf.greater(x_input_one_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 2)
data_mask = tf.tile(data_mask, (1,1,k))
# output: (?, n_one_hot_slot, k)
data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot)
data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)
return data_embed_one_hot_masked
def get_masked_mul_hot(x_input_mul_hot):
data_mask = tf.cast(tf.greater(x_input_mul_hot, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 3)
data_mask = tf.tile(data_mask, (1,1,1,k))
# output: (?, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot)
data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)
# move reduce_sum here
data_embed_mul_hot_masked = tf.reduce_sum(data_embed_mul_hot_masked, 2)
return data_embed_mul_hot_masked
# output: (?, n_one_hot_slot + n_mul_hot_slot, k)
def get_concate_embed(x_input_one_hot, x_input_mul_hot):
data_embed_one_hot = get_masked_one_hot(x_input_one_hot)
data_embed_mul_hot = get_masked_mul_hot(x_input_mul_hot)
data_embed_concat = tf.concat([data_embed_one_hot, data_embed_mul_hot], 1)
return data_embed_concat
def get_masked_one_hot_ngb(x_input_one_hot_ngb):
data_mask = tf.cast(tf.greater(x_input_one_hot_ngb, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 3)
data_mask = tf.tile(data_mask, (1,1,1,k))
# output: (?, max_n_clk, n_one_hot_slot, k)
data_embed_one_hot = tf.nn.embedding_lookup(emb_mat, x_input_one_hot_ngb)
data_embed_one_hot_masked = tf.multiply(data_embed_one_hot, data_mask)
return data_embed_one_hot_masked
def get_masked_mul_hot_ngb(x_input_mul_hot_ngb):
data_mask = tf.cast(tf.greater(x_input_mul_hot_ngb, 0), tf.float32)
data_mask = tf.expand_dims(data_mask, axis = 4)
data_mask = tf.tile(data_mask, (1,1,1,1,k))
# output: (?, max_n_clk, n_mul_hot_slot, max_len_per_slot, k)
data_embed_mul_hot = tf.nn.embedding_lookup(emb_mat, x_input_mul_hot_ngb)
data_embed_mul_hot_masked = tf.multiply(data_embed_mul_hot, data_mask)
# output: (?, max_n_clk, n_mul_hot_slot, k)
data_embed_mul_hot_masked = tf.reduce_sum(data_embed_mul_hot_masked, 3)
return data_embed_mul_hot_masked
# output: (?, max_n_ngb, n_slot, k)
def get_concate_embed_ngb(x_input_one_hot_ngb, x_input_mul_hot_ngb):
data_embed_one_hot = get_masked_one_hot_ngb(x_input_one_hot_ngb)
data_embed_mul_hot = get_masked_mul_hot_ngb(x_input_mul_hot_ngb)
data_embed_concat = tf.concat([data_embed_one_hot, data_embed_mul_hot], 2)
return data_embed_concat
# col_idx starts from 0, wrt data_embed_concat
def get_sel_col(data_embed_concat, col_idx):
cur_col_idx = col_idx[0]
# none * len(col_idx) * k
ft_emb = data_embed_concat[:, cur_col_idx:cur_col_idx+1, :]
for i in range(1, len(col_idx)):
cur_col_idx = col_idx[i]
cur_x = data_embed_concat[:, cur_col_idx:cur_col_idx+1, :]
ft_emb = tf.concat([ft_emb, cur_x], 1)
# reshape -> 2D none * total_dim
ft_emb = tf.reshape(ft_emb, [-1, len(col_idx)*k])
return ft_emb
def get_sel_col_ngb(data_embed_concat_ngb, col_idx):
cur_col_idx = col_idx[0]
# none * max_n_ngb * len(col_idx) * k
ngb_emb = data_embed_concat_ngb[:, :, cur_col_idx:cur_col_idx+1, :]
for i in range(1, len(col_idx)):
cur_col_idx = col_idx[i]
cur_x = data_embed_concat_ngb[:, :, cur_col_idx:cur_col_idx+1, :]
ngb_emb = tf.concat([ngb_emb, cur_x], 2)
# reshape -> 3D none * max_n_ngb * total_dim
ngb_emb = tf.reshape(ngb_emb, [-1, max_n_ngb, len(col_idx)*k])
return ngb_emb
# count number of valid (i.e., not padded with all 0) ngbs
# output: none*1
def count_n_valid_ngb(x_input_one_hot_ngb):
# none * max_n_ngb * n_one_hot_slot_ngb
data_mask_a = tf.cast(tf.greater(x_input_one_hot_ngb, 0), tf.float32)
# none * max_n_ngb
data_mask_a_reduce_sum = tf.reduce_sum(data_mask_a, 2)
data_mask_b = tf.cast(tf.greater(data_mask_a_reduce_sum, 0), tf.float32)
# none * 1
n_valid = 1.0*tf.reduce_sum(data_mask_b, 1, keep_dims=True)
return n_valid
def gen_emb_from_self(data_embed_concat):
######### self attr #########
# none * (len(attr_idx)*k)
attr_emb = get_sel_col(data_embed_concat, attr_idx)
# none * (len(tar_idx)*k)
pred_emb_self = gamma * tf.nn.tanh(tf.matmul(attr_emb, W_meta))
pred_emb_self = tf.reshape(pred_emb_self, [-1,len(tar_idx),k])
return pred_emb_self
def get_emb_from_ngb(data_embed_concat_ngb, x_input_one_hot_ngb):
# none * max_n_ngb * (len(tar_idx) * k)
ngb_emb = get_sel_col_ngb(data_embed_concat_ngb, tar_idx)
n_valid_ngb = count_n_valid_ngb(x_input_one_hot_ngb)
# must flatten first, otherwise [*,a,b] / [*,c] will result in err
avg_ngb_emb = tf.layers.flatten(tf.reduce_sum(ngb_emb, 1)) / (n_valid_ngb + 1e-5)
pred_emb = gamma * tf.nn.tanh(tf.matmul(avg_ngb_emb, W_meta))
pred_emb = tf.reshape(pred_emb, [-1,len(tar_idx),k])
return pred_emb
# GME-P
def gen_emb_from_self_and_ngb_pre(data_embed_concat, data_embed_concat_ngb):
######### self attr #########
# none * (len(attr_idx)*k)
attr_emb = get_sel_col(data_embed_concat, attr_idx)
# none * (len(tar_idx)*k)
pred_emb_self = gamma * tf.nn.tanh(tf.matmul(attr_emb, W_meta))
# none * 1 * (len(tar_idx)*k)
pred_emb_self_exp = tf.expand_dims(pred_emb_self, 1)
# none * (max_n_ngb+1) * (len(tar_idx)*k)
pred_emb_self_tile = tf.tile(pred_emb_self_exp, [1, max_n_ngb+1, 1])
# convert to 2D [fold the first 2 dims]
# (none*(max_n_ngb+1)) * (len(tar_idx)*k)
pred_emb_self_2d = tf.reshape(pred_emb_self_tile, [-1, len(tar_idx)*k])
######### ngb #########
# none * max_n_ngb * (len(tar_idx) * k)
tar_emb_ngb_ori = get_sel_col_ngb(data_embed_concat_ngb, tar_idx)
# none * 1 * (len(attr_idx)*k)
pred_emb_exp = tf.expand_dims(pred_emb_self, 1)
# none * (max_n_ngb + 1) * (len(attr_idx)*k)
tar_emb_ngb = tf.concat([tar_emb_ngb_ori, pred_emb_exp], 1)
# convert to 2D [fold the first 2 dims]
tar_emb_ngb_2d = tf.reshape(tar_emb_ngb, [-1, len(tar_idx)*k])
######### GAT #########
# (none*(max_ngb+1)) * (len(tar_idx)*k)
temp_self = tf.matmul(pred_emb_self_2d, W_gat)
temp_ngb = tf.matmul(tar_emb_ngb_2d, W_gat)
# (none*(max_ngb+1)) * 1
wgt = tf.nn.leaky_relu(tf.matmul(tf.concat([temp_self, temp_ngb], 1), a_gat))
wgt = tf.reshape(wgt, [-1, max_n_ngb+1, 1])
nlz_wgt = tf.nn.softmax(wgt, dim=1)
temp_ngb_re = tf.reshape(temp_ngb, [-1, max_n_ngb+1, len(tar_idx)*k])
# none * (len(tar_idx)*k)
pred_emb_self_new = tf.nn.elu(tf.reduce_sum(temp_ngb_re * nlz_wgt, 1))
# none * len(tar_idx) * k
pred_emb_self_new = tf.reshape(pred_emb_self_new, [-1,len(tar_idx),k])
return pred_emb_self_new, wgt, nlz_wgt
# GME-G
def gen_emb_from_self_and_ngb_gen(data_embed_concat, data_embed_concat_ngb):
######### self attr #########
# none * (len(attr_idx)*k)
attr_emb = get_sel_col(data_embed_concat, attr_idx)
# none * (len(tar_idx)*k)
pred_emb_self = gamma * tf.nn.tanh(tf.matmul(attr_emb, W_meta))
# none * 1 * (len(tar_idx)*k)
pred_emb_self_exp = tf.expand_dims(pred_emb_self, 1)
# none * (max_n_ngb+1) * (len(tar_idx)*k)
pred_emb_self_tile = tf.tile(pred_emb_self_exp, [1, max_n_ngb+1, 1])
# convert to 2D [fold the first 2 dims]
# (none*(max_n_ngb+1)) * (len(tar_idx)*k)
pred_emb_self_2d = tf.reshape(pred_emb_self_tile, [-1, len(tar_idx)*k])
######### ngb #########
# none * max_n_ngb * (len(attr_idx) * k)
attr_emb_ngb_ori = get_sel_col_ngb(data_embed_concat_ngb, attr_idx)
# none * 1 * (len(attr_idx)*k)
attr_emb_exp = tf.expand_dims(attr_emb, 1)
# none * (max_n_ngb + 1) * (len(attr_idx)*k)
attr_emb_ngb = tf.concat([attr_emb_ngb_ori, attr_emb_exp], 1)
# convert to 2D [fold the first 2 dims]
attr_emb_ngb_2d = tf.reshape(attr_emb_ngb, [-1, len(attr_idx)*k])
# (none*(max_n_ngb+1)) * (len(tar_idx)*k)
pred_emb_ngb_2d = gamma * tf.nn.tanh(tf.matmul(attr_emb_ngb_2d, W_meta))
######### GAT #########
# (none*(max_ngb+1)) * (len(tar_idx)*k)
temp_self = tf.matmul(pred_emb_self_2d, W_gat)
temp_ngb = tf.matmul(pred_emb_ngb_2d, W_gat)
# (none*(max_ngb+1)) * 1
wgt = tf.nn.leaky_relu(tf.matmul(tf.concat([temp_self, temp_ngb], 1), a_gat))
| |
+ c * x ** S(4)) ** (p + S(-1)) / (d + e * x ** S(2)), x),
x,
)
def With1255(a, b, c, d, e, x):
q = Rt(-S(4) * a * c + b ** S(2), S(2))
return Dist(
S(2) * sqrt(-c),
Int(
S(1)
/ (
(d + e * x ** S(2))
* sqrt(-b - S(2) * c * x ** S(2) + q)
* sqrt(b + S(2) * c * x ** S(2) + q)
),
x,
),
x,
)
def With1256(a, c, d, e, x):
q = Rt(-a * c, S(2))
return Dist(
sqrt(-c),
Int(
S(1)
/ (
(d + e * x ** S(2)) * sqrt(-c * x ** S(2) + q) * sqrt(c * x ** S(2) + q)
),
x,
),
x,
)
def With1257(a, b, c, d, e, x):
q = Rt(-S(4) * a * c + b ** S(2), S(2))
return Dist(
S(2) * c / (S(2) * c * d - e * (b - q)),
Int(S(1) / sqrt(a + b * x ** S(2) + c * x ** S(4)), x),
x,
) - Dist(
e / (S(2) * c * d - e * (b - q)),
Int(
(b + S(2) * c * x ** S(2) - q)
/ ((d + e * x ** S(2)) * sqrt(a + b * x ** S(2) + c * x ** S(4))),
x,
),
x,
)
def With1258(a, c, d, e, x):
q = Rt(-a * c, S(2))
return Dist(c / (c * d + e * q), Int(S(1) / sqrt(a + c * x ** S(4)), x), x) + Dist(
e / (c * d + e * q),
Int((-c * x ** S(2) + q) / (sqrt(a + c * x ** S(4)) * (d + e * x ** S(2))), x),
x,
)
def With1259(a, b, c, d, e, x):
if isinstance(x, (int, Integer, float, Float)):
return False
q = Rt(c / a, S(4))
if NonzeroQ(-d * q ** S(2) + e):
return True
return False
def replacement1259(a, b, c, d, e, x):
q = Rt(c / a, S(4))
return (
-Dist(
q ** S(2) / (-d * q ** S(2) + e),
Int(S(1) / sqrt(a + b * x ** S(2) + c * x ** S(4)), x),
x,
)
+ Simp(
ArcTan(
x
* sqrt((a * e ** S(2) - b * d * e + c * d ** S(2)) / (d * e))
/ sqrt(a + b * x ** S(2) + c * x ** S(4))
)
/ (S(2) * d * sqrt((a * e ** S(2) - b * d * e + c * d ** S(2)) / (d * e))),
x,
)
+ Simp(
sqrt(
(a + b * x ** S(2) + c * x ** S(4))
/ (a * (q ** S(2) * x ** S(2) + S(1)) ** S(2))
)
* (d * q ** S(2) + e)
* (q ** S(2) * x ** S(2) + S(1))
* EllipticPi(
-((-d * q ** S(2) + e) ** S(2)) / (S(4) * d * e * q ** S(2)),
S(2) * ArcTan(q * x),
-b * q ** S(2) / (S(4) * c) + S(1) / 2,
)
/ (
S(4)
* d
* q
* (-d * q ** S(2) + e)
* sqrt(a + b * x ** S(2) + c * x ** S(4))
),
x,
)
)
def With1260(a, c, d, e, x):
if isinstance(x, (int, Integer, float, Float)):
return False
q = Rt(c / a, S(4))
if NonzeroQ(-d * q ** S(2) + e):
return True
return False
def replacement1260(a, c, d, e, x):
q = Rt(c / a, S(4))
return (
-Dist(
q ** S(2) / (-d * q ** S(2) + e), Int(S(1) / sqrt(a + c * x ** S(4)), x), x
)
+ Simp(
ArcTan(
x
* sqrt((a * e ** S(2) + c * d ** S(2)) / (d * e))
/ sqrt(a + c * x ** S(4))
)
/ (S(2) * d * sqrt((a * e ** S(2) + c * d ** S(2)) / (d * e))),
x,
)
+ Simp(
sqrt((a + c * x ** S(4)) / (a * (q ** S(2) * x ** S(2) + S(1)) ** S(2)))
* (d * q ** S(2) + e)
* (q ** S(2) * x ** S(2) + S(1))
* EllipticPi(
-((-d * q ** S(2) + e) ** S(2)) / (S(4) * d * e * q ** S(2)),
S(2) * ArcTan(q * x),
S(1) / 2,
)
/ (S(4) * d * q * sqrt(a + c * x ** S(4)) * (-d * q ** S(2) + e)),
x,
)
)
def With1261(a, c, d, e, x):
q = Rt(-c / a, S(4))
return Simp(
EllipticPi(-e / (d * q ** S(2)), asin(q * x), S(-1)) / (sqrt(a) * d * q), x
)
def replacement1262(a, c, d, e, x):
return Dist(
sqrt(S(1) + c * x ** S(4) / a) / sqrt(a + c * x ** S(4)),
Int(S(1) / (sqrt(S(1) + c * x ** S(4) / a) * (d + e * x ** S(2))), x),
x,
)
def With1263(a, b, c, d, e, x):
q = Rt(-S(4) * a * c + b ** S(2), S(2))
return Dist(
sqrt(S(2) * c * x ** S(2) / (b - q) + S(1))
* sqrt(S(2) * c * x ** S(2) / (b + q) + S(1))
/ sqrt(a + b * x ** S(2) + c * x ** S(4)),
Int(
S(1)
/ (
(d + e * x ** S(2))
* sqrt(S(2) * c * x ** S(2) / (b - q) + S(1))
* sqrt(S(2) * c * x ** S(2) / (b + q) + S(1))
),
x,
),
x,
)
def replacement1264(a, b, c, d, e, p, x):
return -Dist(
S(1)
/ (
S(2)
* a
* (p + S(1))
* (-S(4) * a * c + b ** S(2))
* (a * e ** S(2) - b * d * e + c * d ** S(2))
),
Int(
(a + b * x ** S(2) + c * x ** S(4)) ** (p + S(1))
* Simp(
-a * b * c * d * e * (S(8) * p + S(11))
+ S(2)
* a
* c
* (
S(4) * a * e ** S(2) * (p + S(1))
+ c * d ** S(2) * (S(4) * p + S(5))
)
+ b ** S(3) * d * e * (S(2) * p + S(3))
- b ** S(2)
* (
S(2) * a * e ** S(2) * (p + S(1))
+ c * d ** S(2) * (S(2) * p + S(3))
)
- c
* e
* x ** S(4)
* (S(4) * p + S(7))
* (S(2) * a * c * e - b ** S(2) * e + b * c * d)
- x ** S(2)
* (
S(4) * a * c ** S(2) * d * e
- b ** S(3) * e ** S(2) * (S(2) * p + S(3))
- S(2) * b ** S(2) * c * d * e * (p + S(2))
+ b
* c
* (
a * e ** S(2) * (S(8) * p + S(11))
+ c * d ** S(2) * (S(4) * p + S(7))
)
),
x,
)
/ (d + e * x ** S(2)),
x,
),
x,
) - Simp(
x
* (a + b * x ** S(2) + c * x ** S(4)) ** (p + S(1))
* (
S(3) * | |
# Smoothed-particle hydrodynamics (SPH) is a computational method used for simulating the mechanics of continuum media, such as solid mechanics and fluid flows.
# Here we utilize SPH to simulate a fountain, who tries to hit a target given by the user.
# The SPH simulator here implemented using Taichi is differentiable.
# Therefore, it can be easily embedding into the training pipeline of a neural network modelled controller.
import taichi as ti
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle as pkl
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--train',
action='store_true',
help='whether train model, default false')
parser.add_argument('place_holder', nargs='*')
args = parser.parse_args()
TRAIN = args.train
TRAIN_OUTPUT_IMG = False
TRAIN_VISUAL = False
TRAIN_VISUAL_SHOW = False
INFER_OUTPUT_IMG = False
ti.init(arch=ti.gpu, device_memory_fraction=0.5, random_seed=5)
screen_res = (1000, 1000)
real = ti.f32
scalar = lambda: ti.field(dtype=real)
@ti.data_oriented
class SGD:
def __init__(self, params, lr):
self.params = params
self.lr = lr
def step(self):
for w in self.params:
self._step(w)
@ti.kernel
def _step(self, w: ti.template()):
for I in ti.grouped(w):
w[I] -= min(max(w.grad[I], -20.0), 20.0) * self.lr
def zero_grad(self):
for w in self.params:
w.grad.fill(0.0)
@ti.data_oriented
class Linear:
def __init__(self, n_models, batch_size, n_steps, n_input, n_hidden, n_output, needs_grad=False, activation=False):
self.n_models = n_models
self.batch_size = batch_size
self.n_steps = n_steps
self.n_input = n_input
self.n_hidden = n_hidden
self.n_output = n_output
self.activation = activation
self.hidden = scalar()
self.output = scalar()
# array of structs
self.batch_node = ti.root.dense(ti.i, self.n_models)
self.n_hidden_node = self.batch_node.dense(ti.j, self.n_hidden)
self.weights1_node = self.n_hidden_node.dense(ti.k, self.n_input)
self.batch_node.dense(ti.axes(1, 2, 3), (self.n_steps, self.batch_size, self.n_hidden)).place(self.hidden)
self.batch_node.dense(ti.axes(1, 2, 3), (self.n_steps, self.batch_size, self.n_output)).place(self.output)
self.weights1 = scalar()
self.bias1 = scalar()
self.weights1_node.place(self.weights1)
self.n_hidden_node.place(self.bias1)
if needs_grad:
ti.root.lazy_grad()
def parameters(self):
return [self.weights1, self.bias1]
@ti.kernel
def weights_init(self):
q1 = ti.sqrt(6 / self.n_input) * 0.01
for model_id, i, j in ti.ndrange(self.n_models, self.n_hidden, self.n_input):
self.weights1[model_id, i, j] = (ti.random() * 2 - 1) * q1
@ti.kernel
def _forward(self, t: ti.i32, nn_input: ti.template()):
for model_id, k, i, j in ti.ndrange(self.n_models, self.batch_size, self.n_hidden, self.n_input):
self.hidden[model_id, t, k, i] += self.weights1[model_id, i, j] * nn_input[model_id, t, k, j]
if ti.static(self.activation):
for model_id, k, i in ti.ndrange(self.n_models, self.batch_size, self.n_hidden):
self.output[model_id, t, k, i] = ti.tanh(self.hidden[model_id, t, k, i] + self.bias1[model_id, i])
else:
for model_id, k, i in ti.ndrange(self.n_models, self.batch_size, self.n_hidden):
self.output[model_id, t, k, i] = self.hidden[model_id, t, k, i] + self.bias1[model_id, i]
@ti.kernel
def clear(self):
for I in ti.grouped(self.hidden):
self.hidden[I] = 0.
for I in ti.grouped(self.output):
self.output[I] = 0.
def forward(self, t, nn_input):
self._forward(t, nn_input)
def dump_weights(self, name="save.pkl"):
w_val = []
for w in self.parameters():
w = w.to_numpy()
w_val.append(w[0])
pkl.dump(w_val, open(name, "wb"))
def load_weights(self, name="save.pkl", model_id=0):
w_val = pkl.load(open(name, 'rb'))
self.load_weights_from_value(w_val, model_id)
def load_weights_from_value(self, w_val, model_id=0):
for w, val in zip(self.parameters(), w_val):
if val.shape[0] == 1:
val = val[0]
self.copy_from_numpy(w, val, model_id)
@ti.kernel
def copy_from_numpy(self, dst: ti.template(), src: ti.ext_arr(), model_id: ti.i32):
for I in ti.grouped(src):
dst[model_id, I] = src[I]
# NN model
model_num = 1
steps = 128
n_input = 3
n_hidden = 32
n_output = 16
n_output_act = 3
learning_rate = 1e-3
loss = ti.field(float, shape=(), needs_grad=True)
if TRAIN:
batch_size = 16
input_states = ti.field(float, shape=(model_num, steps, batch_size, n_input), needs_grad=True)
fc1 = Linear(n_models=model_num, batch_size=batch_size, n_steps=steps, n_input=n_input, n_hidden=n_hidden,
n_output=n_output, needs_grad=True, activation=False)
fc2 = Linear(n_models=model_num, batch_size=batch_size, n_steps=steps, n_input=n_output, n_hidden=n_hidden,
n_output=n_output_act, needs_grad=True, activation=True)
fc1.weights_init()
fc2.weights_init()
NNs = [fc1, fc2]
parameters = []
for layer in NNs:
parameters.extend(layer.parameters())
optimizer = SGD(params=parameters, lr=learning_rate)
# Training data generation
sample_num = batch_size * 25
x_range = (0.05, 0.45)
y_range = (0.4, 1.0)
z_range = (0.05, 0.45)
def targets_generation(num, x_range, y_range, z_range):
low = np.array([x_range[0], y_range[0], z_range[0]])
high = np.array([x_range[1], y_range[1], z_range[1]])
return np.array([np.random.uniform(low=low, high=high) for _ in range(num)])
np.random.seed(0)
all_data = targets_generation(sample_num, x_range, y_range, z_range)
training_sample_num = batch_size * 4
training_data = all_data[:training_sample_num, :]
test_data = all_data[training_sample_num:, :]
print("training data ", training_data.shape, "test data ", test_data.shape)
else:
batch_size = 1
input_states = ti.field(float, shape=(model_num, steps, batch_size, n_input), needs_grad=False)
fc1 = Linear(n_models=model_num, batch_size=batch_size, n_steps=steps, n_input=n_input, n_hidden=n_hidden,
n_output=n_output, needs_grad=False, activation=False)
fc2 = Linear(n_models=model_num, batch_size=batch_size, n_steps=steps, n_input=n_output, n_hidden=n_hidden,
n_output=n_output_act, needs_grad=False, activation=True)
file_dir_path = os.path.dirname(os.path.realpath(__file__))
fc1.load_weights(f"{file_dir_path}/fc1_pretrained.pkl", model_id=0)
fc2.load_weights(f"{file_dir_path}/fc2_pretrained.pkl", model_id=0)
print(f"Model at {file_dir_path} loaded. ")
# Simulation configuration
boundary_box_np = np.array([[0, 0, 0], [0.5, 1.5, 0.5]])
spawn_box_np = np.array([[0.0, 0.0, 0.0], [0.5, 0.05, 0.5]])
target_box_np = np.array([[0.15, 0.90, 0.15], [0.2, 0.95, 0.2]])
target_centers = ti.Vector.field(3, float, shape=batch_size, needs_grad=True)
min_dist = ti.field(float, shape=batch_size, needs_grad=True)
max_dist = ti.field(float, shape=batch_size, needs_grad=True)
max_height = ti.field(float, shape=batch_size, needs_grad=True)
max_left = ti.field(float, shape=batch_size, needs_grad=True)
max_right = ti.field(float, shape=batch_size, needs_grad=True)
jet_force_max = ti.Vector([9.81*3, 9.81*10, 9.81*3])
# Simulation parameters
particle_radius = 0.01
particle_diameter = particle_radius * 2
N_np = ((spawn_box_np[1] - spawn_box_np[0]) / particle_diameter + 1).astype(int)
N_target_np = ((target_box_np[1] - target_box_np[0]) / particle_diameter + 1).astype(int)
h = 4.0 * particle_radius
fluid_particle_num = N_np[0] * N_np[1] * N_np[2]
target_particle_num = N_target_np[0] * N_target_np[1] * N_target_np[2]
particle_num = fluid_particle_num + target_particle_num
print(f"Particle num: {particle_num}")
pos = ti.Vector.field(3, float)
vel = ti.Vector.field(3, float)
acc = ti.Vector.field(3, float)
jet_force = ti.Vector.field(3, float, shape=(steps, batch_size), needs_grad=True)
col = ti.Vector.field(3, float)
material = ti.field(int)
den = ti.field(float)
pre = ti.field(float)
pos_vis_buffer = ti.Vector.field(3, float, shape=particle_num)
pos_output_buffer = ti.Vector.field(3, float, shape=(steps, particle_num))
ti.root.dense(ti.ijk, (batch_size, steps, int(particle_num))).place(pos, vel, acc, den, pre)
ti.root.dense(ti.i, int(particle_num)).place(material, col)
ti.root.lazy_grad()
boundary_box = ti.Vector.field(3, float, shape=2)
spawn_box = ti.Vector.field(3, float, shape=2)
target_box = ti.Vector.field(3, float, shape=2)
N_fluid = ti.Vector([N_np[0], N_np[1], N_np[2]])
N_target = ti.Vector([N_target_np[0], N_target_np[1], N_target_np[2]])
gravity = ti.Vector([0.0, -9.8, 0.0])
boundary_box.from_numpy(boundary_box_np)
spawn_box.from_numpy(spawn_box_np)
target_box.from_numpy(target_box_np)
rest_density = 1000.0
mass = rest_density * particle_diameter * particle_diameter * particle_diameter * 0.8
pressure_scale = 10000.0
viscosity_scale = 0.1 * 3
tension_scale = 0.005
gamma = 1.0
substeps = 5
dt = 0.016 / substeps
eps = 1e-6
damping = 0.5
pi = 3.1415926535
@ti.func
def W_poly6(R, h):
r = R.norm(eps)
res = 0.0
if r <= h:
h2 = h * h
h4 = h2 * h2
h9 = h4 * h4 * h
h2_r2 = h2 - r * r
res = 315.0 / (64 * pi * h9) * h2_r2 * h2_r2 * h2_r2
else:
res = 0.0
return res
@ti.func
def W_spiky_gradient(R, h):
r = R.norm(eps)
res = ti.Vector([0.0, 0.0, 0.0])
if r == 0.0:
res = ti.Vector([0.0, 0.0, 0.0])
elif r <= h:
h3 = h * h * h
h6 = h3 * h3
h_r = h - r
res = -45.0 / (pi * h6) * h_r * h_r * (R / r)
else:
res = ti.Vector([0.0, 0.0, 0.0])
return res
W = W_poly6
W_gradient = W_spiky_gradient
@ti.kernel
def initialize_fluid_particle(t: ti.int32, pos: ti.template(), N_fluid: ti.template()):
# Allocate fluid
for bs, i in ti.ndrange(batch_size, fluid_particle_num):
pos[bs, t, i] = (
ti.Vector(
[int(i % N_fluid[0]), int(i / N_fluid[0]) % N_fluid[1], int(i / N_fluid[0] / N_fluid[1] % N_fluid[2])]
)
* particle_diameter
+ spawn_box[0]
)
vel[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
material[i] = 0
col[i] = ti.Vector([0.4, 0.7, 1.0])
acc.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
pos.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
vel.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
@ti.kernel
def initialize_dists():
for bs in range(batch_size):
min_dist[bs] = 1000.0
max_height[bs] = 0.0
max_left[bs] = 0.0
max_right[bs] = 0.0
@ti.kernel
def initialize_target_particle(t: ti.int32, pos: ti.template(), N_target:ti.template(), current_pos: ti.int32):
# Allocate target cube
for bs, i in ti.ndrange(batch_size, (fluid_particle_num, fluid_particle_num + target_particle_num)):
pos[bs, t, i] = (
ti.Vector(
[int(i % N_target[0]), int(i / N_target[0]) % N_target[1], int(i / N_target[0] / N_target[1] % N_target[2])]
)
* particle_diameter
+ target_centers[current_pos]
)
vel[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
material[i] = 1
col[i] = ti.Vector([1.0, 0.65, 0.0])
acc.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
pos.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
vel.grad[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
@ti.kernel
def initialize_density(t: ti.int32):
for bs, i in ti.ndrange(batch_size, particle_num):
den[bs, t, i] = 0.0
@ti.kernel
def update_density(t: ti.int32):
for bs, i in ti.ndrange(batch_size, particle_num):
for j in range(particle_num):
R = pos[bs, t, i] - pos[bs, t, j]
den[bs, t, i] += mass * W(R, h)
@ti.kernel
def update_pressure(t: ti.int32):
for bs, i in ti.ndrange(batch_size, particle_num):
pre[bs, t, i] = pressure_scale * max(pow(den[bs, t, i] / rest_density, gamma) - 1, 0)
@ti.kernel
def controller_output(t: ti.int32):
for bs in range(batch_size):
for j in ti.static(range(3)):
jet_force[t, bs][j] = fc2.output[0, t, bs, j] * jet_force_max[j]
@ti.kernel
def apply_force(t: ti.int32):
for bs, i in ti.ndrange(batch_size, particle_num):
if material[i] == 1:
acc[bs, t, i] = ti.Vector([0.0, 0.0, 0.0])
else:
if pos[bs, t, i][0] > 0.2 and pos[bs, t, i][0] < 0.3 and pos[bs, t, i][1] < 0.2 and pos[bs, t, i][2] > 0.2 and pos[bs, t, i][2] < 0.3:
indicator = (steps - t) // (steps // 2)
acc[bs, t, i] = jet_force[t, bs] + gravity + indicator * (- gravity) * 0.1
else:
acc[bs, t, i] = gravity
@ti.kernel
def update_force(t: ti.int32):
for bs, i in | |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## pb.py
##
## Created on: Mar 13, 2019
## Author: <NAME>
## E-mail: <EMAIL>
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
EncType
PBEnc
==================
Module description
==================
.. note::
Functionality of this module is available only if the `PyPBLib`
package is installed, e.g. from PyPI:
.. code-block::
$ pip install pypblib
This module provides access to the basic functionality of the `PyPBLib
library <https://pypi.org/project/pypblib/>`__ developed by the `Logic
Optimization Group <http://ulog.udl.cat/>`__ of the University of Lleida.
PyPBLib provides a user with an extensive Python API to the well-known
`PBLib library <http://tools.computational-logic.org/content/pblib.php>`__
[1]_. Note the PyPBLib has a number of `additional features
<http://hardlog.udl.cat/static/doc/pypblib/html/index.html>`__ that cannot
be accessed through PySAT *at this point*. (One concrete example is a
range of cardinality encodings, which clash with the internal
:mod:`pysat.card` module.) If a user needs some functionality of PyPBLib
missing in this module, he/she may apply PyPBLib as a standalone library,
when working with PySAT.
.. [1] <NAME>, <NAME>. *PBLib - A Library for Encoding
Pseudo-Boolean Constraints into CNF*. SAT 2015. pp. 9-16
A *pseudo-Boolean constraint* is a constraint of the form:
:math:`\left(\sum_{i=1}^n{a_i\cdot x_i}\\right)\circ k`, where
:math:`a_i\in\mathbb{N}`, :math:`x_i\in\{y_i,\\neg{y_i}\}`,
:math:`y_i\in\mathbb{B}`, and :math:`\circ\in\{\leq,=,\geq\}`.
Pseudo-Boolean constraints arise in a number of important practical
applications. Thus, several *encodings* of pseudo-Boolean constraints into
CNF formulas are known [2]_. The list of pseudo-Boolean encodings
supported by this module include BDD [3]_ [4]_, sequential weight counters
[5]_, sorting networks [3]_, adder networks [3]_, and binary merge [6]_.
Access to all cardinality encodings can be made through the main class of
this module, which is :class:`.PBEnc`.
.. [2] <NAME>, <NAME>. *Pseudo-Boolean and
Cardinality Constraints*. Handbook of Satisfiability. 2009.
pp. 695-733
.. [3] <NAME>, <NAME>. *Translating Pseudo-Boolean
Constraints into SAT*. JSAT. vol. 2(1-4). 2006. pp. 1-26
.. [4] <NAME>, <NAME>, <NAME>,
<NAME>. *BDDs for Pseudo-Boolean Constraints -
Revisited*. SAT. 2011. pp. 61-75
.. [5] <NAME>, <NAME>, <NAME>. *A Compact
Encoding of Pseudo-Boolean Constraints into SAT*. KI. 2012.
pp. 107-118
.. [6] <NAME>, <NAME>, <NAME>. *A More Compact
Translation of Pseudo-Boolean Constraints into CNF Such That
Generalized Arc Consistency Is Maintained*. KI. 2014. pp. 123-134
==============
Module details
==============
"""
#
#==============================================================================
import math
from pysat.formula import CNF
# checking whether or not pypblib is available and working as expected
pblib_present = True
try:
from pypblib import pblib
except ImportError:
pblib_present = False
#
#==============================================================================
class NoSuchEncodingError(Exception):
"""
This exception is raised when creating an unknown LEQ, GEQ, or Equals
constraint encoding.
"""
pass
#
#==============================================================================
class EncType(object):
"""
This class represents a C-like ``enum`` type for choosing the
pseudo-Boolean encoding to use. The values denoting the encodings are:
::
best = 0
bdd = 1
seqcounter = 2
sortnetwrk = 3
adder = 4
binmerge = 5
The desired encoding can be selected either directly by its integer
identifier, e.g. ``2``, or by its alphabetical name, e.g.
``EncType.seqcounter``.
All the encodings are produced and returned as a list of clauses in
the :class:`pysat.formula.CNF` format.
Note that the encoding type can be set to ``best``, in which case the
encoder selects one of the other encodings from the list (in most
cases, this invokes the ``bdd`` encoder).
"""
best = 0
bdd = 1
seqcounter = 2
sortnetwrk = 3
adder = 4
binmerge = 5
# mapping from internal encoding identifiers to the ones of PyPBLib
_to_pbenc = {
best: pblib.PB_BEST,
bdd: pblib.PB_BDD,
seqcounter: pblib.PB_SWC,
sortnetwrk: pblib.PB_SORTINGNETWORKS,
adder: pblib.PB_ADDER,
binmerge: pblib.PB_BINARY_MERGE
}
# mapping from internal comparator identifiers to the ones of PyPBLib
_to_pbcmp = {
'<': pblib.LEQ,
'>': pblib.GEQ,
'=': pblib.BOTH
}
#
#==============================================================================
class PBEnc(object):
"""
Abstract class responsible for the creation of pseudo-Boolean
constraints encoded to a CNF formula. The class has three main *class
methods* for creating LEQ, GEQ, and Equals constraints. Given (1)
either a list of weighted literals or a list of unweighted literals
followed by a list of weights, (2) an integer bound and an encoding
type, each of these methods returns an object of class
:class:`pysat.formula.CNF` representing the resulting CNF formula.
Since the class is abstract, there is no need to create an object of
it. Instead, the methods should be called directly as class methods,
e.g. ``PBEnc.atmost(wlits, bound)`` or ``PBEnc.equals(lits, weights,
bound)``. An example usage is the following:
.. code-block:: python
>>> from pysat.pb import *
>>> cnf = PBEnc.atmost(lits=[1, 2, 3], weights=[1, 2, 3], bound=3)
>>> print(cnf.clauses)
[[4], [-1, -5], [-2, -5], [5, -3, -6], [6]]
>>> cnf = PBEnc.equals(lits=[1, 2, 3], weights=[1, 2, 3], bound=3, encoding=EncType.bdd)
>>> print(cnf.clauses)
[[4], [-5, -2], [-5, 2, -1], [-5, -1], [-6, 1], [-7, -2, 6], [-7, 2], [-7, 6], [-8, -3, 5], [-8, 3, 7], [-8, 5, 7], [8]]
"""
@classmethod
def _update_vids(cls, cnf, vpool):
"""
Update variable ids in the given formula and id pool.
:param cnf: a list of literals in the sum.
:param vpool: the value of bound :math:`k`.
:type cnf: :class:`.formula.CNF`
:type vpool: :class:`.formula.IDPool`
"""
top, vmap = vpool.top, {} # current top and variable mapping
# creating a new variable mapping, taking into
# account variables marked as "occupied"
while top < cnf.nv:
top += 1
vpool.top += 1
while vpool._occupied and vpool.top >= vpool._occupied[0][0]:
if vpool.top <= vpool._occupied[0][1] + 1:
vpool.top = vpool._occupied[0][1] + 1
vpool._occupied.pop(0)
vmap[top] = vpool.top
# updating the clauses
for cl in cnf.clauses:
cl[:] = map(lambda l: int(math.copysign(vmap[abs(l)], l)) if abs(l) in vmap else l, cl)
# updating the number of variables
cnf.nv = vpool.top
@classmethod
def _encode(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best, comparator='<'):
"""
This is the method that wraps the encoder of PyPBLib. Although the
method can be invoked directly, a user is expected to call one of
the following methods instead: :meth:`atmost`, :meth:`atleast`, or
:meth:`equals`.
The list of literals can contain either integers or pairs ``(l,
w)``, where ``l`` is an integer literal and ``w`` is an integer
weight. The latter can be done only if no ``weights`` are
specified separately.
:param lits: a list of literals in the sum.
:param weights: a list of weights
:param bound: the value of bound :math:`k`.
:param top_id: top variable identifier used so far.
:param vpool: variable pool for counting the number of variables.
:param encoding: identifier of the encoding to use.
:param comparator: identifier of the comparison operator
:type lits: iterable(int)
:type weights: iterable(int)
:type bound: int
:type top_id: integer or None
:type vpool: :class:`.IDPool`
:type encoding: integer
:type comparator: str
:rtype: :class:`pysat.formula.CNF`
"""
assert pblib_present, 'Package \'pypblib\' is unavailable. Check your installation.'
if encoding < 0 or encoding > 5:
raise(NoSuchEncodingError(encoding))
assert lits, 'No literals are provided.'
assert not top_id or not vpool, \
'Use either a top id or a pool of variables but not both.'
# preparing weighted literals
if weights:
assert len(lits) == len(weights), 'Same number of literals and weights is expected.'
wlits = [pblib.WeightedLit(l, w) for l, w in zip(lits, weights)]
else:
if all(map(lambda lw: (type(lw) in (list, tuple)) and len(lw) == 2, lits)):
# literals are already weighted
wlits = [pblib.WeightedLit(*wl) for wl in lits]
lits = zip(*lits)[0] # unweighted literals for getting top_id
elif all(map(lambda l: type(l) is int, lits)):
# no weights are provided => all weights are units
wlits = [pblib.WeightedLit(l, 1) for l in lits]
else:
assert 0, 'Incorrect literals given.'
# obtaining the top id from the variable pool
if vpool:
top_id = vpool.top
if not top_id:
top_id = max(map(lambda x: abs(x), lits))
# pseudo-Boolean constraint and variable manager
constr = pblib.PBConstraint(wlits, EncType._to_pbcmp[comparator], bound)
varmgr = pblib.AuxVarManager(top_id + 1)
# encoder configuration
config = pblib.PBConfig()
config.set_PB_Encoder(EncType._to_pbenc[encoding])
# encoding
result = pblib.VectorClauseDatabase(config)
pb2cnf = pblib.Pb2cnf(config)
pb2cnf.encode(constr, result, varmgr)
# extracting clauses
ret = CNF(from_clauses=result.get_clauses())
# updating vpool if necessary
if vpool:
if vpool._occupied and vpool.top <= vpool._occupied[0][0] <= ret.nv:
cls._update_vids(ret, vpool)
else:
vpool.top = ret.nv - 1
vpool._next()
return ret
@classmethod
def leq(cls, lits, weights=None, bound=1, top_id=None, vpool=None,
encoding=EncType.best):
"""
This method can be used for creating a CNF encoding of | |
#!/usr/bin/env python3
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
print(process,exitv,message)
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- feature id for url
"""
return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = initialmap.keys()
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def usertags2label(usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in classmap.items():
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
| |
when returning server files list"""
# Check if it's an additional path as defined on startup
if len(working_path) > 1:
dir_name = Path(working_path).parts[1]
if additional_folders and dir_name in additional_folders:
return os.path.join(additional_folders[dir_name], working_path[len(dir_name) + 2:])
# Might be a user specific path
if working_path[0] == '/':
working_path = '.' + working_path
cur_path = os.path.abspath(os.path.join(upload_folder, working_path))
if not cur_path.startswith(upload_folder):
return None
return cur_path
@app.route('/server/files', methods=['GET'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_files() -> tuple:
"""Handles listing folder contents
Request args:
path: the relative path to list
file_filter: the filter to apply to the returned names
"""
print("LIST FILES")
return_names = []
have_error = False
path = request.args['path']
file_filter = request.args['filter']
if len(path) <= 0:
print(f'Zero length path requested {path}', flush=True)
return 'Resource not found', 404
# Set the upload folder for this user if it hasn't been set yet
# pylint: disable=consider-using-with
if 'upload_folder' not in session or session['upload_folder'] is None or not os.path.isdir(session['upload_folder']):
session['upload_folder'] = tempfile.mkdtemp(dir=FILE_START_PATH)
working_path = normalize_path(path)
try:
cur_path = _handle_files_get_path(working_path, session['upload_folder'], ADDITIONAL_LOCAL_FOLDERS)
if not cur_path:
print(f'Invalid path requested: "{path}"', flush=True)
return 'Resource not found', 400
except FileNotFoundError as ex:
print("A file not found exception was caught:", ex)
have_error = True
if have_error:
return 'Resource not found', 404
for one_file in os.listdir(cur_path):
file_path = os.path.join(cur_path, one_file)
if not one_file[0] == '.' and (not file_filter or (file_filter and fnmatch.fnmatch(one_file, file_filter))):
return_names.append({'name': one_file,
'path': os.path.join(path, one_file),
'size': os.path.getsize(file_path),
'date': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(file_path))),
'type': 'folder' if os.path.isdir(file_path) else 'file'
})
if ADDITIONAL_LOCAL_FOLDERS and path == '/':
for one_name, _ in ADDITIONAL_LOCAL_FOLDERS.items():
return_names.append({'name': one_name,
'path': '/' + one_name,
'size': 0,
'date': '',
'type': 'folder'
})
return json.dumps(return_names)
@app.route('/irods/connect', methods=['POST'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_irods_connect() -> tuple:
"""Handles connecting to the iRODS server
Request args:
host: the CyVerse host to access
port: the port associated with the host
zone: the zone of the user account
user: the user name associated with the account
password: <PASSWORD>
Returns:
The success establishing a connection to the server
"""
have_error = False
host, port, zone, user, password = None, None, None, None, None
# Get the fields from the request
try:
host = request.form.get('host')
port = request.form.get('port')
zone = request.form.get('zone')
user = request.form.get('user')
password = request.form.get('password')
if None in [host, port, zone, user, password]:
have_error = True
except ValueError as ex:
print("A value exception was caught while fetching form data:", ex)
have_error = True
if have_error:
print ("Missing or bad value: Host:", str(host), " Port:", str(port), " Zone:", str(zone), " User:",
str(user), " Password:", ('***' if password else str(password)))
return 'iRODS fields are missing or invalid', 400
session['connection'] = {'host': host, 'port': port, 'user': user, 'password': password, 'zone': zone}
return {'path': f'/{zone}/home/{user}'}
@app.route('/irods/files', methods=['GET'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_irods_files() -> tuple:
"""Handles listing folder contents
Request args:
path: the relative path to list
file_filter: the filter to apply to the returned names
"""
return_names = []
path = request.args['path']
file_filter = request.args['filter']
conn_info = session['connection']
conn = iRODSSession(host=conn_info['host'], port=conn_info['port'], user=conn_info['user'],
password=conn_info['password'], zone=conn_info['zone'])
if len(path) <= 0:
print('Zero length path requested {path}', flush=True)
return 'Resource not found', 404
try:
col = conn.collections.get(path)
for one_obj in col.data_objects:
if not one_obj.name == '.' and (not file_filter or (file_filter and fnmatch.fnmatch(one_obj.name, file_filter))):
# pylint: disable=consider-using-f-string
return_names.append({'name': one_obj.name,
'path': one_obj.path,
'size': one_obj.size,
'date': '{0:%Y-%m-%d %H:%M:%S}'.format(one_obj.modify_time),
'type': 'file'
})
for one_obj in col.subcollections:
return_names.append({'name': one_obj.name,
'path': one_obj.path,
'size': 0,
'date': '',
'type': 'folder'
})
except irods.exception.NetworkException as ex:
print('Network exception caught for iRODS listing: ', path, ex)
return f'Unable to complete iRODS listing request: {path}', 504
except irods.exception.CAT_INVALID_AUTHENTICATION as ex:
print('Invalid authentication exception caught for iRODS listing: ', path, ex)
return f'Invalid password specified for iRODS listing request: {path}', 401
except irods.exception.CAT_INVALID_USER as ex:
print('Invalid user exception caught for iRODS listing: ', path, ex)
return f'Invalid user specified for iRODS listing request: {path}', 401
return json.dumps(return_names)
@app.route('/workflow/definitions', methods=['GET'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_workflow_definitions() -> tuple:
"""Handles returning the workflows as JSON
"""
print("Workflow definitions")
return json.dumps(WORKFLOW_DEFINITIONS)
def _handle_workflow_start_save(workflow_save_path: str, params_save_path: str, cur_workflow: dict, workflow_params: list) -> None:
"""Writes the workflow data to files
Arguments:
workflow_save_path - the path to save the current workflow data
params_save_path - the path to save workflow parameters to
cur_workflow - the current workflow
workflow_params - the workflow parameters to save
"""
with open(workflow_save_path, 'w', encoding='utf8') as out_file:
json.dump(cur_workflow, out_file)
with open(params_save_path, 'w', encoding='utf8') as out_file:
json.dump(workflow_params, out_file)
def _handle_workflow_start_find(workflow_data: dict) -> dict:
"""Attempts to find the requested workflow using the workflow data passed on the handle_workflow_start() call
Arguments:
workflow_data - the workflow data to find the workflow for
"""
cur_workflow = None
for one_workflow in WORKFLOW_DEFINITIONS:
if one_workflow['id'] == workflow_data['id']:
cur_workflow = one_workflow
break
# If we can't find the workflow, check for uploaded workflows
if cur_workflow is None and 'workflow_files' in session and session['workflow_files'] is not None:
if workflow_data['id'] in session['workflow_files']:
workflow_file_path = os.path.join(session['workflow_folder'], session['workflow_files'][workflow_data['id']])
if os.path.exists(workflow_file_path):
try:
with open(workflow_file_path, 'r', encoding='utf8') as in_file:
cur_workflow = json.load(in_file)
except json.JSONDecodeError as ex:
# pylint: disable=consider-using-f-string
msg = 'ERROR: A JSON decode error was caught trying to run file "%s"' % os.path.basename(workflow_file_path)
print(msg, ex)
except Exception as ex:
# pylint: disable=consider-using-f-string
msg = 'ERROR: An unknown exception was caught trying to run file "%s"' % os.path.basename(workflow_file_path)
print(msg, ex)
# See if we were sent the workflow
if cur_workflow is None and 'workflow' in workflow_data:
cur_workflow = workflow_data['workflow']
return cur_workflow
@app.route('/workflow/start', methods=['POST'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_workflow_start() -> tuple:
"""Handles starting a workflow
Request body:
config: the workflow configuration to run
"""
print("Workflow start")
cur_workflow = None
workflow_data = request.get_json(force=True)
# Set the workflow folder for this user if it hasn't been set yet
# pylint: disable=consider-using-with
if 'workflow_folder' not in session or session['workflow_folder'] is None or not os.path.isdir(session['workflow_folder']):
session['workflow_folder'] = tempfile.mkdtemp(dir=WORKFLOW_FILE_START_PATH)
# Set the upload folder for this user if it hasn't been set yet
# pylint: disable=consider-using-with
if 'upload_folder' not in session or session['upload_folder'] is None or not os.path.isdir(session['upload_folder']):
session['upload_folder'] = tempfile.mkdtemp(dir=FILE_START_PATH)
# Find the workflow
cur_workflow = _handle_workflow_start_find(workflow_data)
# Make sure we can find the workflow
if cur_workflow is None:
# pylint: disable=consider-using-f-string
msg = "Unable to find workflow associated with workflow ID %s" % (str(workflow_data['id']))
print(msg)
return msg, 400 # Bad request
# Start the process of getting the files
workflow_id = uuid.uuid4().hex
working_dir = os.path.join(WORKFLOW_RUN_PATH, workflow_id)
os.makedirs(working_dir, exist_ok=True)
# Check if we need to decrypt some data
if 'workflow' in workflow_data and 'passcode' in workflow_data['workflow']:
print("HACK: unsecuring parameters before starting workflow")
workflow_params = unsecure_workflow_parameters(workflow_data['params'], workflow_data['workflow']['passcode'])
else:
workflow_params = workflow_data['params']
cur_workflow['id'] = workflow_id
workflow_start(workflow_id, cur_workflow, workflow_params, FILE_HANDLERS, working_dir)
_handle_workflow_start_save(os.path.join(working_dir, '_workflow'), os.path.join(working_dir, '_params'), cur_workflow,
workflow_data['params'])
# Keep workflow IDs in longer term storage
if 'workflows' not in session or session['workflows'] is None:
session['workflows'] = [workflow_id,]
else:
updated_workflows = session['workflows']
updated_workflows.append(workflow_id)
session['workflows'] = updated_workflows
session[workflow_id] = cur_workflow
return json.dumps({'id': workflow_id, 'start_ts': datetime.datetime.now().isoformat().split('.')[0]})
@app.route('/workflow/recover', methods=['GET'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_workflow_recover() -> tuple:
"""Attempts to recover workflows
"""
if 'workflows' in session:
known_workflows = session['workflows']
else:
known_workflows = []
found_workflow_ids = []
if known_workflows:
for one_workflow_id in known_workflows:
working_dir = os.path.join(WORKFLOW_RUN_PATH, one_workflow_id)
workflow_params = os.path.join(working_dir, '_params')
workflow_file = os.path.join(working_dir, '_workflow')
# Disable the pylint check here to allow all the files to be checked on one line
# pylint: disable=too-many-boolean-expressions
if os.path.exists(working_dir) and os.path.isdir(working_dir) and os.path.exists(workflow_params) \
and os.path.isfile(workflow_params) and os.path.exists(workflow_file) and os.path.isfile(workflow_file):
# Recover the workflow
found_workflow_ids.append(one_workflow_id)
# We now have a list of workflow IDs that are valid
missing_workflows = list(set(known_workflows) - set(found_workflow_ids))
# Fix up the session information
session['workflows'] = found_workflow_ids
for one_missing_id in missing_workflows:
if one_missing_id in session:
session.pop(one_missing_id)
# If we have workflows
all_workflows = []
for one_workflow_id in found_workflow_ids:
working_dir = os.path.join(WORKFLOW_RUN_PATH, one_workflow_id)
workflow_params = os.path.join(working_dir, '_params')
workflow_file = os.path.join(working_dir, '_workflow')
with open(workflow_params, 'r', encoding='utf8') as in_file:
workflow_params = json.load(in_file)
with open(workflow_file, 'r', encoding='utf8') as in_file:
found_workflow = json.load(in_file)
workflow_data = {
'id': one_workflow_id,
'params': workflow_params,
'workflow': found_workflow,
'status': workflow_status(one_workflow_id, working_dir)
}
all_workflows.append(workflow_data)
return json.dumps(all_workflows)
@app.route('/workflow/delete/<string:workflow_id>', methods=['PUT'])
@cross_origin(origin='127.0.0.1:3000', headers=['Content-Type','Authorization'])
def handle_workflow_delete(workflow_id: str) -> tuple:
"""Deletes the workflow, if it's finished
Arguments:
workflow_id: the id of the workflow to delete
"""
try:
print("Workflow delete", workflow_id)
cur_workflows = | |
<filename>manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
# Copyright (c) 2015 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP cDOT multi-SVM storage driver library.
This library extends the abstract base library and completes the multi-SVM
functionality needed by the cDOT multi-SVM Manila driver. This library
variant creates Data ONTAP storage virtual machines (i.e. 'vservers')
as needed to provision shares.
"""
import copy
import re
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import units
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils
from manila.share import share_types
from manila.share import utils as share_utils
from manila import utils
LOG = log.getLogger(__name__)
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
SEGMENTED_NETWORK_TYPES = ('vlan',)
DEFAULT_MTU = 1500
CLUSTER_IPSPACES = ('Cluster', 'Default')
class NetAppCmodeMultiSVMFileStorageLibrary(
lib_base.NetAppCmodeFileStorageLibrary):
@na_utils.trace
def check_for_setup_error(self):
if self._have_cluster_creds:
if self.configuration.netapp_vserver:
msg = ('Vserver is specified in the configuration. This is '
'ignored when the driver is managing share servers.')
LOG.warning(msg)
else: # only have vserver creds, which is an error in multi_svm mode
msg = _('Cluster credentials must be specified in the '
'configuration when the driver is managing share servers.')
raise exception.InvalidInput(reason=msg)
# Ensure one or more aggregates are available.
if not self._find_matching_aggregates():
msg = _('No aggregates are available for provisioning shares. '
'Ensure that the configuration option '
'netapp_aggregate_name_search_pattern is set correctly.')
raise exception.NetAppException(msg)
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
check_for_setup_error())
@na_utils.trace
def _get_vserver(self, share_server=None, vserver_name=None,
backend_name=None):
if share_server:
backend_details = share_server.get('backend_details')
vserver = backend_details.get(
'vserver_name') if backend_details else None
if not vserver:
msg = _('Vserver name is absent in backend details. Please '
'check whether Vserver was created properly.')
raise exception.VserverNotSpecified(msg)
elif vserver_name:
vserver = vserver_name
else:
msg = _('Share server or vserver name not provided')
raise exception.InvalidInput(reason=msg)
if backend_name:
vserver_client = data_motion.get_client_for_backend(
backend_name, vserver
)
else:
vserver_client = self._get_api_client(vserver)
if not vserver_client.vserver_exists(vserver):
raise exception.VserverNotFound(vserver=vserver)
return vserver, vserver_client
def _get_ems_pool_info(self):
return {
'pools': {
'vserver': None,
'aggregates': self._find_matching_aggregates(),
},
}
@na_utils.trace
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
self._client.prune_deleted_nfs_export_policies()
self._client.prune_deleted_snapshots()
self._client.remove_unused_qos_policy_groups()
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
_handle_housekeeping_tasks())
@na_utils.trace
def _find_matching_aggregates(self):
"""Find all aggregates match pattern."""
aggregate_names = self._client.list_non_root_aggregates()
pattern = self.configuration.netapp_aggregate_name_search_pattern
return [aggr_name for aggr_name in aggregate_names
if re.match(pattern, aggr_name)]
@na_utils.trace
def setup_server(self, network_info, metadata=None):
"""Creates and configures new Vserver."""
vlan = network_info['segmentation_id']
ports = {}
for network_allocation in network_info['network_allocations']:
ports[network_allocation['id']] = network_allocation['ip_address']
nfs_config = self._default_nfs_config
if (self.is_nfs_config_supported and metadata and
'share_type_id' in metadata):
extra_specs = share_types.get_share_type_extra_specs(
metadata['share_type_id'])
self._check_nfs_config_extra_specs_validity(extra_specs)
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_server_with_lock():
LOG.debug('Creating server %s', network_info['server_id'])
self._validate_network_type(network_info)
vserver_name = self._get_vserver_name(network_info['server_id'])
server_details = {
'vserver_name': vserver_name,
'ports': jsonutils.dumps(ports),
}
if self.is_nfs_config_supported:
server_details['nfs_config'] = jsonutils.dumps(nfs_config)
try:
self._create_vserver(vserver_name, network_info, metadata,
nfs_config=nfs_config)
except Exception as e:
e.detail_data = {'server_details': server_details}
raise
return server_details
return setup_server_with_lock()
@na_utils.trace
def _check_nfs_config_extra_specs_validity(self, extra_specs):
"""Check if the nfs config extra_spec has valid values."""
int_extra_specs = ['netapp:tcp_max_xfer_size',
'netapp:udp_max_xfer_size']
for key in int_extra_specs:
if key in extra_specs:
self._check_if_extra_spec_is_positive(
extra_specs[key], key)
@na_utils.trace
def _check_if_extra_spec_is_positive(self, value, key):
"""Check if extra_spec has a valid positive int value."""
if int(value) < 0:
args = {'value': value, 'key': key}
msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
'used by share server setup.')
raise exception.NetAppException(msg % args)
@na_utils.trace
def _get_nfs_config_provisioning_options(self, specs):
"""Return the nfs config provisioning option."""
nfs_config = self.get_string_provisioning_options(
specs, self.NFS_CONFIG_EXTRA_SPECS_MAP)
# Changes the no set config to the default value
for k, v in nfs_config.items():
if v is None:
nfs_config[k] = self._default_nfs_config[k]
return nfs_config
@na_utils.trace
def _validate_network_type(self, network_info):
"""Raises exception if the segmentation type is incorrect."""
if network_info['network_type'] not in SUPPORTED_NETWORK_TYPES:
msg = _('The specified network type %s is unsupported by the '
'NetApp clustered Data ONTAP driver')
raise exception.NetworkBadConfigurationException(
reason=msg % network_info['network_type'])
@na_utils.trace
def _get_vserver_name(self, server_id):
return self.configuration.netapp_vserver_name_template % server_id
@na_utils.trace
def _create_vserver(self, vserver_name, network_info, metadata=None,
nfs_config=None):
"""Creates Vserver with given parameters if it doesn't exist."""
if self._client.vserver_exists(vserver_name):
msg = _('Vserver %s already exists.')
raise exception.NetAppException(msg % vserver_name)
# NOTE(dviroel): check if this vserver will be a data protection server
is_dp_destination = False
if metadata and metadata.get('migration_destination') is True:
is_dp_destination = True
msg = _("Starting creation of a vserver with 'dp_destination' "
"subtype.")
LOG.debug(msg)
# NOTE(lseki): If there's already an ipspace created for the same VLAN
# port, reuse it. It will be named after the previously created share
# server's neutron subnet id.
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['segmentation_id']
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(network_info)
if is_dp_destination:
# Get Data ONTAP aggregate name as pool name.
LOG.debug('Creating a new Vserver (%s) for data protection.',
vserver_name)
self._client.create_vserver_dp_destination(
vserver_name,
self._find_matching_aggregates(),
ipspace_name)
# Set up port and broadcast domain for the current ipspace
self._create_port_and_broadcast_domain(ipspace_name, network_info)
else:
LOG.debug('Vserver %s does not exist, creating.', vserver_name)
self._client.create_vserver(
vserver_name,
self.configuration.netapp_root_volume_aggregate,
self.configuration.netapp_root_volume,
self._find_matching_aggregates(),
ipspace_name)
vserver_client = self._get_api_client(vserver=vserver_name)
security_services = network_info.get('security_services')
try:
self._setup_network_for_vserver(
vserver_name, vserver_client, network_info, ipspace_name,
security_services=security_services, nfs_config=nfs_config)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to configure Vserver.")
# NOTE(dviroel): At this point, the lock was already
# acquired by the caller of _create_vserver.
self._delete_vserver(vserver_name,
security_services=security_services,
needs_lock=False)
def _setup_network_for_vserver(self, vserver_name, vserver_client,
network_info, ipspace_name,
enable_nfs=True, security_services=None,
nfs_config=None):
self._create_vserver_lifs(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_admin_lif(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_routes(vserver_client,
network_info)
if enable_nfs:
vserver_client.enable_nfs(
self.configuration.netapp_enabled_share_protocols,
nfs_config=nfs_config)
if security_services:
self._client.setup_security_services(security_services,
vserver_client,
vserver_name)
def _get_valid_ipspace_name(self, network_id):
"""Get IPspace name according to network id."""
return 'ipspace_' + network_id.replace('-', '_')
@na_utils.trace
def _create_ipspace(self, network_info):
"""If supported, create an IPspace for a new Vserver."""
if not self._client.features.IPSPACES:
return None
if (network_info['network_allocations'][0]['network_type']
not in SEGMENTED_NETWORK_TYPES):
return client_cmode.DEFAULT_IPSPACE
# NOTE(cknight): Neutron needs cDOT IP spaces because it can provide
# overlapping IP address ranges for different subnets. That is not
# believed to be an issue for any of Manila's other network plugins.
ipspace_id = network_info.get('neutron_subnet_id')
if not ipspace_id:
return client_cmode.DEFAULT_IPSPACE
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
self._client.create_ipspace(ipspace_name)
return ipspace_name
@na_utils.trace
def _create_vserver_lifs(self, vserver_name, vserver_client, network_info,
ipspace_name):
"""Create Vserver data logical interfaces (LIFs)."""
nodes = self._client.list_cluster_nodes()
node_network_info = zip(nodes, network_info['network_allocations'])
for node_name, network_allocation in node_network_info:
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_admin_lif(self, vserver_name, vserver_client,
network_info, ipspace_name):
"""Create Vserver admin LIF, if defined."""
network_allocations = network_info.get('admin_network_allocations')
if not network_allocations:
LOG.info('No admin network defined for Vserver %s.',
vserver_name)
return
node_name = self._client.list_cluster_nodes()[0]
network_allocation = network_allocations[0]
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_routes(self, vserver_client, network_info):
"""Create Vserver route and set gateways."""
route_gateways = []
# NOTE(gouthamr): Use the gateway from the tenant subnet/s
# for the static routes. Do not configure a route for the admin
# subnet because fast path routing will work for incoming
# connections and there are no requirements for outgoing
# connections on the admin network yet.
for net_allocation in (network_info['network_allocations']):
if net_allocation['gateway'] not in route_gateways:
vserver_client.create_route(net_allocation['gateway'])
route_gateways.append(net_allocation['gateway'])
@na_utils.trace
def _get_node_data_port(self, node):
port_names = self._client.list_node_data_ports(node)
pattern = self.configuration.netapp_port_name_search_pattern
matched_port_names = [port_name for port_name in port_names
if re.match(pattern, port_name)]
if not matched_port_names:
raise exception.NetAppException(
_('Could not find eligible network ports on node %s on which '
'to create Vserver LIFs.') % node)
return matched_port_names[0]
def _get_lif_name(self, node_name, network_allocation):
"""Get LIF name based on template from manila.conf file."""
lif_name_args = {
'node': node_name,
'net_allocation_id': network_allocation['id'],
}
return self.configuration.netapp_lif_name_template % lif_name_args
@na_utils.trace
def _create_lif(self, vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation):
"""Creates LIF for Vserver."""
port = self._get_node_data_port(node_name)
ip_address = network_allocation['ip_address']
netmask = utils.cidr_to_netmask(network_allocation['cidr'])
vlan = network_allocation['segmentation_id']
network_mtu = network_allocation.get('mtu')
mtu = network_mtu or DEFAULT_MTU
if not vserver_client.network_interface_exists(
vserver_name, node_name, port, ip_address, netmask, vlan):
self._client.create_network_interface(
ip_address, netmask, vlan, node_name, port, vserver_name,
lif_name, ipspace_name, mtu)
@na_utils.trace
def _create_port_and_broadcast_domain(self, ipspace_name, network_info):
nodes = self._client.list_cluster_nodes()
node_network_info = zip(nodes, network_info['network_allocations'])
for node_name, network_allocation in node_network_info:
port = self._get_node_data_port(node_name)
vlan = network_allocation['segmentation_id']
network_mtu = network_allocation.get('mtu')
mtu = network_mtu or DEFAULT_MTU
| |
overseeing overseen overseer
overseers oversees overshadow overshadowed overshadowing overshadows
overshoot overshooting overshoots overshot oversight oversights
oversimplification oversleep oversleeping oversleeps overslept
overstate overstated overstates overstating overstep overstepped
overstepping oversteps overt overtake overtaken overtakes overtaking
overthrew overthrow overthrowing overthrown overthrows overtimes
overtly overtook overture overtures overturn overturned overturning
overturns overuse overused overuses overusing overweight
overwhelmingly overwork overworked overworking overworks overwrite
overwrites overwrought ovum owl owls ox oxen oxes oxidation oxide
oxides oyster oysters pa paced pacemaker pacemakers paces pacific
pacified pacifiers pacifies pacifism pacifist pacifists pacify
pacifying pacing packer packers pact pacts paddies paddle paddled
paddles paddling paddock paddocked paddocking paddocks paddy padlock
padlocked padlocking padlocks pagan pagans pageant pageantry pageants
pager pagination pagoda pagodas pail pails pained painfuller
painfullest paining painlessly painstaking painter paired pairing pal
palaces palatable palate palates palatial paled paleontologist
paleontologists paleontology paler pales palest palette palettes
paling pall pallbearer pallbearers palled pallid palling pallor palls
palm palmed palming palms palomino palominos palpable palpably pals
paltrier paltriest paltry pamper pampered pampering pampers pamphlet
pamphlets panacea panaceas pancake pancaked pancakes pancaking
pancreas pancreases pancreatic panda pandas pandemonium pander
pandered pandering panders pane panes pang panged panging pangs
panhandle panhandled panhandler panhandlers panhandles panhandling
panicked panickier panickiest panicking panicky panics panned panning
panorama panoramas panoramic pans pansies pansy panted panther
panthers pantie panties panting pantomime pantomimed pantomimes
pantomiming pantries pantry pap papa papacies papacy papal papas
papaya papayas paperbacked paperbacking paperbacks papered papering
paperweight paperweights paperwork paprika papyri papyrus parable
parabled parables parabling parachute parachuted parachutes
parachuting paraded parades paradigm parading paradises paradoxes
paradoxical paradoxically paraffin paragon paragons paragraphed
paragraphing parakeet parakeets paralysis paralytic paralytics
paramount paranoids paraphernalia paraphrased paraphrases paraphrasing
paraplegic paraplegics parasite parasites parasitic parasol parasols
paratrooper paratroopers parcel parcels parch parched parches parching
parchment parchments pardonable pardoned pardoning pardons pare pared
parentage parental parented parenthetical parenthood parenting pares
paring parish parishes parishioner parishioners parka parkas parkway
parkways parliamentary parliaments parodied parodies parodying parole
paroled paroles paroling parred parring parroted parroting parrots
pars parsec parsecs parser parsley parsnip parsnips parson parsonage
parsonages parsons partake partaken partakes partaking parted
partiality partials participation participle participles particulars
partied parting partings partisan partisans partnered partnering
partnership partnerships partook partridge partridges partying pas
passable passageway passageways passbook passbooks passe passer
passionated passionately passionates passionating passioned passioning
passions passively passives passports pasta pastas pasted pastel
pastels pastes pastiche pastier pasties pastiest pastime pastimes
pasting pastor pastoral pastorals pastors pastries pastry pasts
pasture pastured pastures pasturing pasty patchwork patchworks patchy
pate patented patenting patently patents paternal paternalism
paternity pates pathetically pathological pathologist pathologists
pathology pathos pathway pathways patienter patientest patiently patio
patios patriarch patriarchal patriarchs patrimonies patrimony patriot
patriotic patriotism patriots patrol patrolled patrolling patrols
patron patronage patronages patrons pats patted patter pattered
pattering patterned patterning patters patties patting patty paucity
paunch paunched paunches paunchier paunchiest paunching paunchy pauper
paupers pave paved pavemented pavementing pavements paves pavilion
pavilions paving paw pawed pawing pawn pawnbroker pawnbrokers pawned
pawning pawns paws payable payer payers payload payoff payoffs payroll
payrolls pea peaceable peacefuller peacefullest peacefully peacemaker
peacemakers peaces peach peaches peacock peacocks peaked peaking peal
pealed pealing peals pear pearl pearled pearling pearls pears peas
peat pebble pebbled pebbles pebbling pecan pecans peck pecked pecking
pecks peculiarities peculiarity peculiarly pedagogy pedals peddle
peddled peddler peddlers peddles peddling pedestal pedestals
pediatricians pediatrics pedigree pedigrees peek peeked peeking peeks
peel peeled peeling peels peep peeped peeping peeps peered peering
peerless peeve peeved peeves peeving peevish peg pegged pegging pegs
pelican pelicans pellet pelleted pelleting pellets pelt pelted pelting
pelts pelvic pelvics pelvis pelvises penal penance penanced penances
penancing penchant pencils pendant pendants pendulum pendulums
penetrate penetrated penetrates penetrating penetration penetrations
penguins penicillin peninsula peninsulas penis penises penitence
penitent penitentiaries penitentiary penitents penknife penknives
penmanship pennant pennants penned penniless penning pension pensioned
pensioner pensioners pensioning pensions pensive pensively pentagon
pentagonal pentagonals pentagons penthouse penthoused penthouses
penthousing penultimate peon peonies peons peony peopled peopling pep
pepped pepper peppered peppering peppermint peppermints peppers
pepping peps percentages perceptible perceptions perceptive perch
perchance perched perches perching percolate percolated percolates
percolating percolation percolator percolators percussion peremptory
perennial perennials peres perfected perfecter perfectest perfecting
perfectionist perfectionists perfections perfects perforate perforated
perforates perforating perforation perforations performer performers
perfume perfumed perfumes perfuming perfunctorily perfunctory
perhapses peril perilous perilously perils perimeter perimeters
periodical periodicals peripheries periphery periscope periscoped
periscopes periscoping perish perishable perishables perished perishes
perishing perjure perjured perjures perjuries perjuring perjury perk
perked perkier perkiest perking perks perky permanence permanents
permeate permeated permeates permeating permissions permissive
permutation permutations pernicious peroxide peroxided peroxides
peroxiding perpendicular perpendiculars perpetrate perpetrated
perpetrates perpetrating perpetrator perpetrators perpetually
perpetuals perpetuate perpetuated perpetuates perpetuating perplex
perplexed perplexes perplexing perplexities perplexity persecution
persecutions persecutor persecutors perseverance persevere persevered
perseveres persevering persisted persistence persistently persisting
persists persona personable personals personification personifications
personified personifies personify personifying perspectives
perspiration perspire perspired perspires perspiring persuasions
persuasive persuasively pert pertain pertained pertaining pertains
perter pertest pertinent pertinents perts perturb perturbed perturbing
perturbs perusal perusals peruse perused peruses perusing pervade
pervaded pervades pervading pervasive perversion perversions pervert
perverted perverting perverts peskier peskiest pesky pessimism
pessimist pessimistic pessimists pest pester pestered pestering
pesters pesticide pesticides pestilence pestilences pests petal petals
peter petered petering peters petite petites petition petitioned
petitioning petitions petrified petrifies petrify petrifying petroleum
pets petted petticoat petticoats pettier petties pettiest pettiness
petting petulant petunia petunias pew pews pewter pewters phantom
phantoms pharmaceutical pharmaceuticals pharmacist pharmacists
pheasant pheasants phenomenal phenomenally phenomenas philanthropic
philanthropies philanthropist philanthropists philanthropy phlegm
phlegmatic phobia phobias phonetic phonetics phonics phonied phonier
phonies phoniest phonograph phonographs phony phonying phosphor
phosphorescence phosphorescent phosphorus photocopied photocopier
photocopiers photocopies photocopying photoed photogenic photographed
photographer photographers photographing photography photoing photon
photons photosynthesis phototypesetter phraseology physicals physician
physicians physiological physique physiques pianist pianists pianos
piccolo piccolos pickax pickaxed pickaxes pickaxing picket picketed
picketing pickets pickier pickiest pickle pickled pickles pickling
pickpocket pickpockets pickup pickups picky picnic picnicked
picnicking picnics pictorial pictorials pictured picturesque picturing
piddle piddled piddles piddling pieced piecemeal piecework piecing
pier pierce pierced pierces piercing piers pies piety pigeoned
pigeonhole pigeonholed pigeonholes pigeonholing pigeoning pigeons
pigged pigging piggish piggyback piggybacked piggybacking piggybacks
pigheaded pigment pigments pigpen pigpens pigtail pigtails pike piked
pikes piking piled pilfer pilfered pilfering pilfers pilgrim
pilgrimage pilgrimages pilgrims piling pillage pillaged pillages
pillaging pillar pillars pillow pillowcase pillowcases pillowed
pillowing pillows piloted piloting pilots pimple pimples pimplier
pimpliest pimply pincushion pincushions pine pineapple pineapples
pined pines pining pinion pinioned pinioning pinions pinked pinker
pinkest pinking pinks pinnacle pinnacles pinned pinning pinpoint
pinpointed pinpointing pinpoints pioneer pioneered pioneering pioneers
pious piped pipelines piping pique piqued piques piquing piracy
piranha piranhas pirate pirated pirates pirating pirouette pirouetted
pirouettes pirouetting pis pistachio pistachios pistol pistols piston
pistons pitched pitcher pitchers pitches pitchfork pitchforked
pitchforking pitchforks pitching piteous piteously pithier pithiest
pithy pitied pities pitiful pitifuller pitifullest pitifully pitiless
pits pittance pittances pitted pitting pitying pivot pivotal pivoted
pivoting pivots pixie pixies placard placarded placarding placards
placate placated placates placating placement placenta placentas
placid placidly plagiarism plagiarisms plagiarist plagiarists plaice
plaid plaided plaiding plaids plainer plainest plains plaintiff
plaintiffs plaintive planar planed planetarium planetariums planing
plank planked planking planks plankton planner planners plantain
plantains plantation plantations planter planters plaque plaques
plasma plastics plateau plateaued plateauing plateaus plated
platformed platforming platforms plating platinum platitude platitudes
platoon platooned platooning platoons platter platters plausibility
plausibly playable playback playful playfully playfulness playgrounds
playhouse playhouses playmate playmates playpen playpens plaything
playthings playwright playwrights plaza plazas plead pleaded pleading
pleads pleas pleasanter pleasantest pleasantries pleasantry pleasings
pleasurable pleasured pleasures pleasuring pleat pleated pleating
pleats pledge pledged pledges pledging plentiful plentifully plethora
pliable pliant plied pliers plies plight plighted plighting plights
plod plodded plodding plods plop plopped plopping plops plotters ploys
pluck plucked plucking plucks plucky plum plumage plumb plumbed
plumber plumbers plumbing plumbs plume plumed plumes pluming plummet
plummeted plummeting plummets plump plumped plumper plumpest plumping
plumps plums plunder plundered plundering plunders plunge plunged
plunger plungers plunges plunging plurality plurals pluses plush
plusher plushest plussed plussing plutonium ply plying plywood
pneumatic pneumonia poach poached poacher poachers poaches poaching
pocketbook pocketbooks pocketed pocketing pockmark pockmarked
pockmarking pockmarks pod podded podding podium podiums pods poetical
poignancy poignant poinsettia poinsettias pointedly pointlessly poise
poised poises poising poisonous poked poker pokers pokes pokier
pokiest poking poky polarity polars poled polemic polemics poles
policed policemen polices policewoman policewomen policing poling
polio polios politely politer politest polka polkaed polkaing polkas
polled pollen pollinate pollinated pollinates pollinating pollination
polling pollster pollsters pollutant pollutants pollute polluted
pollutes polluting polo polygamous polygamy polygon polygons
polynomials polyp polyps polytechnic pomegranate pomegranates pomp
poncho ponchos pond ponder pondered pondering ponderous ponders ponds
ponies pontoon pontooned pontooning pontoons pony poodle poodles
pooled pooling pools poop pooped pooping poops popcorn poplar poplars
poppies poppy populaces popularly populars populous porcelain porch
porches porcupine porcupines pore pored pores poring pornographic
porous porpoise porpoised porpoises porpoising porridge portables
portal portals portend portended portending portends portent portents
portered portering portfolio portfolios porthole portholes portico
porticoes portioned portioning portlier portliest portly portrait
portraits portrayal portrayals posies positional | |
<filename>mesonbuild/compilers/mixins/gnu.py
# Copyright 2019 The meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides mixins for GNU compilers and GNU-like compilers."""
import abc
import functools
import os
import pathlib
import re
import subprocess
import typing as T
from ... import mesonlib
from ... import mlog
if T.TYPE_CHECKING:
from ...environment import Environment
from .clike import CLikeCompiler as Compiler
else:
# This is a bit clever, for mypy we pretend that these mixins descend from
# Compiler, so we get all of the methods and attributes defined for us, but
# for runtime we make them descend from object (which all classes normally
# do). This gives up DRYer type checking, with no runtime impact
Compiler = object
# XXX: prevent circular references.
# FIXME: this really is a posix interface not a c-like interface
clike_debug_args = {
False: [],
True: ['-g'],
} # type: T.Dict[bool, T.List[str]]
gnulike_buildtype_args = {
'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
} # type: T.Dict[str, T.List[str]]
gnu_optimization_args = {
'0': [],
'g': ['-Og'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
} # type: T.Dict[str, T.List[str]]
gnulike_instruction_set_args = {
'mmx': ['-mmmx'],
'sse': ['-msse'],
'sse2': ['-msse2'],
'sse3': ['-msse3'],
'ssse3': ['-mssse3'],
'sse41': ['-msse4.1'],
'sse42': ['-msse4.2'],
'avx': ['-mavx'],
'avx2': ['-mavx2'],
'neon': ['-mfpu=neon'],
} # type: T.Dict[str, T.List[str]]
gnu_symbol_visibility_args = {
'': [],
'default': ['-fvisibility=default'],
'internal': ['-fvisibility=internal'],
'hidden': ['-fvisibility=hidden'],
'protected': ['-fvisibility=protected'],
'inlineshidden': ['-fvisibility=hidden', '-fvisibility-inlines-hidden'],
} # type: T.Dict[str, T.List[str]]
gnu_color_args = {
'auto': ['-fdiagnostics-color=auto'],
'always': ['-fdiagnostics-color=always'],
'never': ['-fdiagnostics-color=never'],
} # type: T.Dict[str, T.List[str]]
@functools.lru_cache(maxsize=None)
def gnulike_default_include_dirs(compiler: T.Tuple[str], lang: str) -> T.List[str]:
lang_map = {
'c': 'c',
'cpp': 'c++',
'objc': 'objective-c',
'objcpp': 'objective-c++'
}
if lang not in lang_map:
return []
lang = lang_map[lang]
env = os.environ.copy()
env["LC_ALL"] = 'C'
cmd = list(compiler) + ['-x{}'.format(lang), '-E', '-v', '-']
p = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=env
)
stdout = p.stdout.read().decode('utf-8', errors='replace')
parse_state = 0
paths = [] # type: T.List[str]
for line in stdout.split('\n'):
line = line.strip(' \n\r\t')
if parse_state == 0:
if line == '#include "..." search starts here:':
parse_state = 1
elif parse_state == 1:
if line == '#include <...> search starts here:':
parse_state = 2
else:
paths.append(line)
elif parse_state == 2:
if line == 'End of search list.':
break
else:
paths.append(line)
if not paths:
mlog.warning('No include directory found parsing "{cmd}" output'.format(cmd=" ".join(cmd)))
# Append a normalized copy of paths to make path lookup easier
paths += [os.path.normpath(x) for x in paths]
return paths
class GnuLikeCompiler(Compiler, metaclass=abc.ABCMeta):
"""
GnuLikeCompiler is a common interface to all compilers implementing
the GNU-style commandline interface. This includes GCC, Clang
and ICC. Certain functionality between them is different and requires
that the actual concrete subclass define their own implementation.
"""
LINKER_PREFIX = '-Wl,'
def __init__(self) -> None:
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_pie']
if not (self.info.is_windows() or self.info.is_cygwin() or self.info.is_openbsd()):
self.base_options.append('b_lundef')
if not self.info.is_windows() or self.info.is_cygwin():
self.base_options.append('b_asneeded')
if not self.info.is_hurd():
self.base_options.append('b_sanitize')
# All GCC-like backends can do assembly
self.can_compile_suffixes.add('s')
def get_pic_args(self) -> T.List[str]:
if self.info.is_windows() or self.info.is_cygwin() or self.info.is_darwin():
return [] # On Window and OS X, pic is always on.
return ['-fPIC']
def get_pie_args(self) -> T.List[str]:
return ['-fPIE']
def get_buildtype_args(self, buildtype: str) -> T.List[str]:
return gnulike_buildtype_args[buildtype]
@abc.abstractmethod
def get_optimization_args(self, optimization_level: str) -> T.List[str]:
pass
def get_debug_args(self, is_debug: bool) -> T.List[str]:
return clike_debug_args[is_debug]
@abc.abstractmethod
def get_pch_suffix(self) -> str:
pass
def split_shlib_to_parts(self, fname: str) -> T.Tuple[str, str]:
return os.path.dirname(fname), fname
def get_instruction_set_args(self, instruction_set: str) -> T.Optional[T.List[str]]:
return gnulike_instruction_set_args.get(instruction_set, None)
def get_default_include_dirs(self) -> T.List[str]:
return gnulike_default_include_dirs(tuple(self.exelist), self.language)
@abc.abstractmethod
def openmp_flags(self) -> T.List[str]:
pass
def gnu_symbol_visibility_args(self, vistype: str) -> T.List[str]:
return gnu_symbol_visibility_args[vistype]
def gen_vs_module_defs_args(self, defsfile: str) -> T.List[str]:
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# On Windows targets, .def files may be specified on the linker command
# line like an object file.
if self.info.is_windows() or self.info.is_cygwin():
return [defsfile]
# For other targets, discard the .def file.
return []
def get_argument_syntax(self) -> str:
return 'gcc'
def get_profile_generate_args(self) -> T.List[str]:
return ['-fprofile-generate']
def get_profile_use_args(self) -> T.List[str]:
return ['-fprofile-use', '-fprofile-correction']
def get_gui_app_args(self, value: bool) -> T.List[str]:
if self.info.is_windows() or self.info.is_cygwin():
return ['-mwindows' if value else '-mconsole']
return []
def compute_parameters_with_absolute_paths(self, parameter_list: T.List[str], build_dir: str) -> T.List[str]:
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
@functools.lru_cache()
def _get_search_dirs(self, env: 'Environment') -> str:
extra_args = ['--print-search-dirs']
with self._build_wrapper('', env, extra_args=extra_args,
dependencies=None, mode='compile',
want_output=True) as p:
return p.stdout
def _split_fetch_real_dirs(self, pathstr: str) -> T.List[str]:
# We need to use the path separator used by the compiler for printing
# lists of paths ("gcc --print-search-dirs"). By default
# we assume it uses the platform native separator.
pathsep = os.pathsep
# clang uses ':' instead of ';' on Windows https://reviews.llvm.org/D61121
# so we need to repair things like 'C:\foo:C:\bar'
if pathsep == ';':
pathstr = re.sub(r':([^/\\])', r';\1', pathstr)
# pathlib treats empty paths as '.', so filter those out
paths = [p for p in pathstr.split(pathsep) if p]
result = []
for p in paths:
# GCC returns paths like this:
# /usr/lib/gcc/x86_64-linux-gnu/8/../../../../x86_64-linux-gnu/lib
# It would make sense to normalize them to get rid of the .. parts
# Sadly when you are on a merged /usr fs it also kills these:
# /lib/x86_64-linux-gnu
# since /lib is a symlink to /usr/lib. This would mean
# paths under /lib would be considered not a "system path",
# which is wrong and breaks things. Store everything, just to be sure.
pobj = pathlib.Path(p)
unresolved = pobj.as_posix()
if pobj.exists():
if unresolved not in result:
result.append(unresolved)
try:
resolved = pathlib.Path(p).resolve().as_posix()
if resolved not in result:
result.append(resolved)
except FileNotFoundError:
pass
return result
def get_compiler_dirs(self, env: 'Environment', name: str) -> T.List[str]:
'''
Get dirs from the compiler, either `libraries:` or `programs:`
'''
stdo = self._get_search_dirs(env)
for line in stdo.split('\n'):
if line.startswith(name + ':'):
return self._split_fetch_real_dirs(line.split('=', 1)[1])
return []
def get_lto_compile_args(self) -> T.List[str]:
return ['-flto']
def sanitizer_compile_args(self, value: str) -> T.List[str]:
if value == 'none':
return []
args = ['-fsanitize=' + value]
if 'address' in value: # for -fsanitize=address,undefined
args.append('-fno-omit-frame-pointer')
return args
def get_output_args(self, target: str) -> T.List[str]:
return ['-o', target]
def get_dependency_gen_args(self, outtarget: str, outfile: str) -> T.List[str]:
return ['-MD', '-MQ', outtarget, '-MF', outfile]
def get_compile_only_args(self) -> T.List[str]:
return ['-c']
def get_include_args(self, path: str, is_system: bool) -> T.List[str]:
if not path:
path = '.'
if is_system:
return ['-isystem' + path]
return ['-I' + path]
@classmethod
def use_linker_args(cls, linker: str) -> T.List[str]:
if linker not in {'gold', 'bfd', 'lld'}:
raise mesonlib.MesonException(
'Unsupported linker, only bfd, gold, and lld are supported, '
'not {}.'.format(linker))
return ['-fuse-ld={}'.format(linker)]
def get_coverage_args(self) -> T.List[str]:
return ['--coverage']
class GnuCompiler(GnuLikeCompiler):
"""
GnuCompiler represents an actual GCC in its many incarnations.
Compilers imitating GCC (Clang/Intel) should use the GnuLikeCompiler ABC.
"""
def __init__(self, defines: T.Optional[T.Dict[str, str]]):
super().__init__()
self.id = 'gcc'
self.defines = defines or {}
self.base_options.append('b_colorout')
def get_colorout_args(self, colortype: str) -> T.List[str]:
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_warn_args(self, level: str) -> T.List[str]:
# Mypy doesn't understand cooperative inheritance
args = super().get_warn_args(level)
if mesonlib.version_compare(self.version, '<4.8.0') and '-Wpedantic' in args:
# -Wpedantic was added in 4.8.0
# https://gcc.gnu.org/gcc-4.8/changes.html
args[args.index('-Wpedantic')] = '-pedantic'
return args
def has_builtin_define(self, define: str) -> bool:
return define in self.defines
def get_builtin_define(self, define: str) -> T.Optional[str]:
if define in self.defines:
return self.defines[define]
return None
def get_optimization_args(self, optimization_level: str) -> T.List[str]:
return gnu_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'gch'
def openmp_flags(self) -> T.List[str]:
return ['-fopenmp']
def has_arguments(self, args: T.List[str], env: 'Environment', code: str,
mode: str) -> T.Tuple[bool, bool]:
# For some compiler command line arguments, the GNU compilers will
# emit a warning on stderr indicating that | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like cp command for cloud storage providers."""
from __future__ import absolute_import
import logging
import os
import time
import traceback
from apitools.base.py import encoding
from gslib import copy_helper
from gslib.cat_helper import CatHelper
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.commands.compose import MAX_COMPONENT_COUNT
from gslib.copy_helper import CreateCopyHelperOpts
from gslib.copy_helper import GetSourceFieldsNeededForCopy
from gslib.copy_helper import GZIP_ALL_FILES
from gslib.copy_helper import ItemExistsError
from gslib.copy_helper import Manifest
from gslib.copy_helper import PARALLEL_UPLOAD_TEMP_NAMESPACE
from gslib.copy_helper import SkipUnsupportedObjectError
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.metrics import LogPerformanceSummaryParams
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.name_expansion import SourceUrlTypeIterator
from gslib.posix_util import ConvertModeToBase8
from gslib.posix_util import DeserializeFileAttributesFromObjectMetadata
from gslib.posix_util import InitializeUserGroups
from gslib.posix_util import POSIXAttributes
from gslib.posix_util import SerializeFileAttributesToObjectMetadata
from gslib.posix_util import ValidateFilePermissionAccess
from gslib.storage_url import ContainsWildcard
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.util import CalculateThroughput
from gslib.util import CreateLock
from gslib.util import DEBUGLEVEL_DUMP_REQUESTS
from gslib.util import GetCloudApiInstance
from gslib.util import IsCloudSubdirPlaceholder
from gslib.util import MakeHumanReadable
from gslib.util import NO_MAX
from gslib.util import NormalizeStorageClass
from gslib.util import RemoveCRLFFromString
from gslib.util import StdinIterator
_SYNOPSIS = """
gsutil cp [OPTION]... src_url dst_url
gsutil cp [OPTION]... src_url... dst_url
gsutil cp [OPTION]... -I dst_url
"""
_SYNOPSIS_TEXT = """
<B>SYNOPSIS</B>
""" + _SYNOPSIS
_DESCRIPTION_TEXT = """
<B>DESCRIPTION</B>
The gsutil cp command allows you to copy data between your local file
system and the cloud, copy data within the cloud, and copy data between
cloud storage providers. For example, to copy all text files from the
local directory to a bucket you could do:
gsutil cp *.txt gs://my-bucket
Similarly, you can download text files from a bucket by doing:
gsutil cp gs://my-bucket/*.txt .
If you want to copy an entire directory tree you need to use the -r option:
gsutil cp -r dir gs://my-bucket
If you have a large number of files to transfer you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
copy:
gsutil -m cp -r dir gs://my-bucket
You can pass a list of URLs (one per line) to copy on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to upload or download files / objects as generated by a program,
such as:
some_program | gsutil -m cp -I gs://my-bucket
or:
some_program | gsutil -m cp -I ./download_dir
The contents of stdin can name files, cloud URLs, and wildcards of files
and cloud URLs.
"""
_NAME_CONSTRUCTION_TEXT = """
<B>HOW NAMES ARE CONSTRUCTED</B>
The gsutil cp command strives to name objects in a way consistent with how
Linux cp works, which causes names to be constructed in varying ways depending
on whether you're performing a recursive directory copy or copying
individually named objects; and whether you're copying to an existing or
non-existent directory.
When performing recursive directory copies, object names are constructed that
mirror the source directory structure starting at the point of recursive
processing. For example, if dir1/dir2 contains the file a/b/c then the
command:
gsutil cp -r dir1/dir2 gs://my-bucket
will create the object gs://my-bucket/dir2/a/b/c.
In contrast, copying individually named files will result in objects named by
the final path component of the source files. For example, again assuming
dir1/dir2 contains a/b/c, the command:
gsutil cp dir1/dir2/** gs://my-bucket
will create the object gs://my-bucket/c.
The same rules apply for downloads: recursive copies of buckets and
bucket subdirectories produce a mirrored filename structure, while copying
individually (or wildcard) named objects produce flatly named files.
Note that in the above example the '**' wildcard matches all names
anywhere under dir. The wildcard '*' will match names just one level deep. For
more details see "gsutil help wildcards".
There's an additional wrinkle when working with subdirectories: the resulting
names depend on whether the destination subdirectory exists. For example,
if gs://my-bucket/subdir exists as a subdirectory, the command:
gsutil cp -r dir1/dir2 gs://my-bucket/subdir
will create the object gs://my-bucket/subdir/dir2/a/b/c. In contrast, if
gs://my-bucket/subdir does not exist, this same gsutil cp command will create
the object gs://my-bucket/subdir/a/b/c.
Note: If you use the
`Google Cloud Platform Console <https://console.cloud.google.com>`_
to create folders, it does so by creating a "placeholder" object that ends
with a "/" character. gsutil skips these objects when downloading from the
cloud to the local file system, because attempting to create a file that
ends with a "/" is not allowed on Linux and MacOS. Because of this, it is
recommended that you not create objects that end with "/" (unless you don't
need to be able to download such objects using gsutil).
"""
_SUBDIRECTORIES_TEXT = """
<B>COPYING TO/FROM SUBDIRECTORIES; DISTRIBUTING TRANSFERS ACROSS MACHINES</B>
You can use gsutil to copy to and from subdirectories by using a command
like:
gsutil cp -r dir gs://my-bucket/data
This will cause dir and all of its files and nested subdirectories to be
copied under the specified destination, resulting in objects with names like
gs://my-bucket/data/dir/a/b/c. Similarly you can download from bucket
subdirectories by using a command like:
gsutil cp -r gs://my-bucket/data dir
This will cause everything nested under gs://my-bucket/data to be downloaded
into dir, resulting in files with names like dir/data/a/b/c.
Copying subdirectories is useful if you want to add data to an existing
bucket directory structure over time. It's also useful if you want
to parallelize uploads and downloads across multiple machines (potentially
reducing overall transfer time compared with simply running gsutil -m
cp on one machine). For example, if your bucket contains this structure:
gs://my-bucket/data/result_set_01/
gs://my-bucket/data/result_set_02/
...
gs://my-bucket/data/result_set_99/
you could perform concurrent downloads across 3 machines by running these
commands on each machine, respectively:
gsutil -m cp -r gs://my-bucket/data/result_set_[0-3]* dir
gsutil -m cp -r gs://my-bucket/data/result_set_[4-6]* dir
gsutil -m cp -r gs://my-bucket/data/result_set_[7-9]* dir
Note that dir could be a local directory on each machine, or it could be a
directory mounted off of a shared file server; whether the latter performs
acceptably will depend on a number of factors, so we recommend experimenting
to find out what works best for your computing environment.
"""
_COPY_IN_CLOUD_TEXT = """
<B>COPYING IN THE CLOUD AND METADATA PRESERVATION</B>
If both the source and destination URL are cloud URLs from the same
provider, gsutil copies data "in the cloud" (i.e., without downloading
to and uploading from the machine where you run gsutil). In addition to
the performance and cost advantages of doing this, copying in the cloud
preserves metadata (like Content-Type and Cache-Control). In contrast,
when you download data from the cloud it ends up in a file, which has
no associated metadata. Thus, unless you have some way to hold on to
or re-create that metadata, downloading to a file will not retain the
metadata.
Copies spanning locations and/or storage classes cause data to be rewritten
in the cloud, which may take some time (but still will be faster than
downloading and re-uploading). Such operations can be resumed with the same
command if they are interrupted, so long as the command parameters are
identical.
Note that by default, the gsutil cp command does not copy the object
ACL to the new object, and instead will use the default bucket ACL (see
"gsutil help defacl"). You can override this behavior with the -p
option (see OPTIONS below).
One additional note about copying in the cloud: If the destination bucket has
versioning enabled, by default gsutil cp will copy only live versions of the
source object(s). For example:
gsutil cp gs://bucket1/obj gs://bucket2
will cause only the single live version of gs://bucket1/obj to be copied to
gs://bucket2, even if there are archived versions of gs://bucket1/obj. To also
copy archived versions, use the -A flag:
gsutil cp -A gs://bucket1/obj gs://bucket2
| |
<filename>pypsbuilder/psclasses.py
"""pypsbuilder classes used by builders.
This module contains classes and tools providing API to THERMOCALC, parsing of
outputs and storage of calculated invariant points and univariant lines.
Todo:
* Implement own class for divariant fields
"""
# author: <NAME>
# website: petrol.natur.cuni.cz/~ondro
import sys
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import gzip
import subprocess
# import itertools
# import re
from pathlib import Path
# from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import LineString, Point
from shapely.ops import polygonize, linemerge # unary_union
popen_kw = dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=False)
polymorphs = [{'sill', 'and'}, {'ky', 'and'}, {'sill', 'ky'}, {'q', 'coe'}, {'diam', 'gph'}]
"""list: List of two-element sets containing polymorphs."""
class InitError(Exception):
pass
class ScriptfileError(Exception):
pass
class TCError(Exception):
pass
class TCAPI(object):
"""THERMOCALC working directory API.
Attributes:
workdir (pathlib.Path): Path instance pointing to working directory.
tcexe (pathlib.Path): Path instance pointing to *THERMOCALC* executable.
drexe (pathlib.Path): Path instance pointing to *dawpd* executable
name (str): Basename of the project.
axname (str): Name of a-x file in use.
OK (bool): Boolean value. True when all settings are correct and
THERMOCALC is ready to be used by builders.
excess (set): Set of excess phases from scriptfile.
trange (tuple): Tuple of temperature window from setdefTwindow
prange (tuple): Tuple of pressure window from setdefPwindow
bulk (list): List of bulk composition(s).
ptx_steps (int): Number of compositional steps for T-X and P-X sections.
phases (list): List of names of available phases.
TCenc (str): Encoding used for THERMOCALC output text files.
Default 'mac-roman'.
Raises:
InitError: An error occurred during initialization of working dir.
ScriptfileError: Error or problem in scriptfile.
TCError: THERMOCALC bombed.
"""
def __init__(self, workdir, tcexe=None, drexe=None):
self.workdir = Path(workdir).resolve()
self.TCenc = 'mac-roman'
try:
errinfo = 'Initialize project error!'
self.tcexe = None
self.drexe = None
if tcexe is not None:
self.tcexe = self.workdir / tcexe
if drexe is not None:
self.drexe = self.workdir / drexe
if self.tcexe is None:
# default exe
if sys.platform.startswith('win'):
tcpat = 'tc3*.exe'
else:
tcpat = 'tc3*'
# THERMOCALC exe
for p in self.workdir.glob(tcpat):
if p.is_file() and os.access(str(p), os.X_OK):
self.tcexe = p.resolve()
break
if self.drexe is None:
# default exe
if sys.platform.startswith('win'):
drpat = 'dr1*.exe'
else:
drpat = 'dr1*'
# DRAWPD exe
for p in self.workdir.glob(drpat):
if p.is_file() and os.access(str(p), os.X_OK):
self.drexe = p.resolve()
break
if not self.tcexe:
raise InitError('No THERMOCALC executable in working directory.')
# if not self.drexe:
# InitError('No drawpd executable in working directory.')
# tc-prefs file
if not self.workdir.joinpath('tc-prefs.txt').exists():
raise InitError('No tc-prefs.txt file in working directory.')
errinfo = 'tc-prefs.txt file in working directory cannot be accessed.'
for line in self.workdir.joinpath('tc-prefs.txt').open('r', encoding=self.TCenc):
kw = line.split()
if kw != []:
if kw[0] == 'scriptfile':
self.name = kw[1]
if not self.scriptfile.exists():
raise InitError('tc-prefs: scriptfile tc-' + self.name + '.txt does not exists in your working directory.')
if kw[0] == 'calcmode':
if kw[1] != '1':
raise InitError('tc-prefs: calcmode must be 1.')
if kw[0] == 'dontwrap':
if kw[1] != 'no':
raise InitError('tc-prefs: dontwrap must be no.')
# defaults
self.ptx_steps = 20 # IS IT NEEDED ????
# Checks various settings
errinfo = 'Scriptfile error!'
with self.scriptfile.open('r', encoding=self.TCenc) as f:
r = f.read()
lines = [ln.strip() for ln in r.splitlines() if ln.strip() != '']
lines = lines[:lines.index('*')] # remove part not used by TC
# Check pypsbuilder blocks
if not ('%{PSBCALC-BEGIN}' in lines and '%{PSBCALC-END}' in lines):
raise ScriptfileError('There are not {PSBCALC-BEGIN} and {PSBCALC-END} tags in your scriptfile.')
if not ('%{PSBGUESS-BEGIN}' in lines and '%{PSBGUESS-END}' in lines):
raise ScriptfileError('There are not {PSBGUESS-BEGIN} and {PSBGUESS-END} tags in your scriptfile.')
if not ('%{PSBBULK-BEGIN}' in lines and '%{PSBBULK-END}' in lines):
raise ScriptfileError('There are not {PSBBULK-BEGIN} and {PSBBULK-END} tags in your scriptfile.')
# Create scripts directory
scripts = {}
for ln in lines:
ln_clean = ln.split('%')[0].strip()
if ln_clean != '':
tokens = ln_clean.split(maxsplit=1)
if len(tokens) > 1:
if tokens[0] in scripts:
scripts[tokens[0]].append(tokens[1].strip())
else:
scripts[tokens[0]] = [tokens[1].strip()]
else:
scripts[tokens[0]] = []
# axfile
if 'axfile' not in scripts:
raise ScriptfileError('No axfile script, axfile is mandatory script.')
errinfo = 'Missing argument for axfile script in scriptfile.'
self.axname = scripts['axfile'][0]
if not self.axfile.exists():
raise ScriptfileError('axfile ' + str(self.axfile) + ' does not exists in working directory')
# diagramPT
if 'diagramPT' not in scripts:
raise ScriptfileError('No diagramPT script, diagramPT is mandatory script.')
errinfo = 'Wrong arguments for diagramPT script in scriptfile.'
pmin, pmax, tmin, tmax = scripts['diagramPT'][0].split()
self.prange = float(pmin), float(pmax)
self.trange = float(tmin), float(tmax)
# bulk
errinfo = 'Wrong bulk in scriptfile.'
if 'bulk' not in scripts:
raise ScriptfileError('No bulk script, bulk must be provided.')
if not (1 < len(scripts['bulk']) < 4):
raise ScriptfileError('Bulk script must have 2 or 3 lines.')
self.bulk = []
self.bulk.append(scripts['bulk'][0].split())
self.bulk.append(scripts['bulk'][1].split())
if len(scripts['bulk']) == 3:
self.bulk.append(scripts['bulk'][2].split()[:len(self.bulk[0])]) # remove possible number of steps
# inexcess
errinfo = 'Wrong inexcess in scriptfile.'
if 'setexcess' in scripts:
raise ScriptfileError('setexcess script depreceated, use inexcess instead.')
if 'inexcess' in scripts:
if scripts['inexcess']:
self.excess = set(scripts['inexcess'][0].split()) - set(['no'])
else:
raise ScriptfileError('In case of no excess phases, use inexcess no')
# omit
errinfo = 'Wrong omit in scriptfile.'
if 'omit' in scripts:
self.omit = set(scripts['omit'][0].split())
else:
self.omit = set()
# samecoding
if 'samecoding' in scripts:
self.samecoding = [set(sc.split()) for sc in scripts['samecoding']]
# pseudosection
if 'pseudosection' not in scripts:
raise ScriptfileError('No pseudosection script, pseudosection is mandatory script.')
# autoexit
if 'autoexit' not in scripts:
raise ScriptfileError('No autoexit script, autoexit must be provided.')
# dogmin
if 'dogmin' in scripts:
raise ScriptfileError('Dogmin script should be removed from scriptfile.')
# TC
errinfo = 'Error during initial TC run.'
calcs = ['calcP {}'.format(sum(self.prange) / 2),
'calcT {}'.format(sum(self.trange) / 2),
'with xxx']
old_calcs = self.update_scriptfile(get_old_calcs=True, calcs=calcs)
output = self.runtc()
self.update_scriptfile(calcs=old_calcs)
if '-- run bombed in whichphases' not in output:
raise TCError(output)
self.tcout = output.split('-- run bombed in whichphases')[0].strip()
ax_phases = set(self.tcout.split('reading ax:')[1].split(2 * os.linesep)[0].split())
# which
if 'with' in scripts:
if scripts['with'][0].split()[0] == 'someof':
raise ScriptfileError('Pypsbuilder does not support with sameof <phase list>. Use omit {}'.format(' '.join(ax_phases.union(*self.samecoding) - set(scripts['with'][0].split()[1:]))))
# union ax phases and samecoding and diff omit
self.phases = ax_phases.union(*self.samecoding) - self.omit
# OK
self.status = 'Initial check done.'
self.OK = True
except BaseException as e:
if isinstance(e, InitError) or isinstance(e, ScriptfileError) or isinstance(e, TCError):
self.status = '{}: {}'.format(type(e).__name__, str(e))
else:
self.status = '{}: {} {}'.format(type(e).__name__, str(e), errinfo)
self.OK = False
def __str__(self):
return str(self.workdir)
def __repr__(self):
if self.OK:
return '\n'.join(['{}'.format(self.tcversion),
'Working directory: {}'.format(self.workdir),
'Scriptfile: {}'.format('tc-' + self.name + '.txt'),
'AX file: {}'.format('tc-' + self.axname + '.txt'),
'Status: {}'.format(self.status)])
else:
return '\n'.join(['Uninitialized working directory {}'.format(self.workdir),
'Status: {}'.format(self.status)])
@property
def scriptfile(self):
"""pathlib.Path: Path to scriptfile."""
return self.workdir.joinpath('tc-' + self.name + '.txt')
def read_scriptfile(self):
with self.scriptfile.open('r', encoding=self.TCenc) as f:
r = f.read()
return r
@property
def drfile(self):
"""pathlib.Path: Path to -dr output file."""
return self.workdir.joinpath('tc-' + self.name + '-dr.txt')
@property
def logfile(self):
"""pathlib.Path: Path to THERMOCALC log file."""
return self.workdir.joinpath('tc-log.txt')
@property
def icfile(self):
"""pathlib.Path: Path to ic file."""
return self.workdir.joinpath('tc-' + self.name + '-ic.txt')
@property
def itfile(self):
"""pathlib.Path: Path to it file."""
return self.workdir.joinpath('tc-' + self.name + '-it.txt')
@property
def ofile(self):
"""pathlib.Path: Path to project output file."""
return self.workdir.joinpath('tc-' + self.name + '-o.txt')
@property
def csvfile(self):
"""pathlib.Path: Path to csv file."""
return self.workdir.joinpath('tc-' + self.name + '-csv.txt')
@property
def drawpdfile(self):
"""pathlib.Path: Path to drawpd file."""
return self.workdir.joinpath('dr-' + self.name + '.txt')
@property
def axfile(self):
"""pathlib.Path: Path to used a-x file."""
return self.workdir.joinpath('tc-' + self.axname + '.txt')
@property
def prefsfile(self):
"""pathlib.Path: Path to THERMOCALC prefs file."""
return self.workdir.joinpath('tc-prefs.txt')
def read_prefsfile(self):
with self.prefsfile.open('r', encoding=self.TCenc) as f:
r = f.read()
return r
@property
def tcversion(self):
"""str: Version identification of THERMCALC executable."""
return self.tcout.split('\n')[0]
@property
def tcnewversion(self):
"""bool: False for THERMOCALC older than 3.5."""
return not float(self.tcversion.split()[1]) < 3.5
@property
def datasetfile(self):
"""pathlib.Path: Path to dataset file."""
return self.workdir.joinpath(self.dataset.split(' produced')[0])
@property
def dataset(self):
"""str: Version identification of thermodynamic dataset in use."""
return self.tcout.split('using ')[1].split('\n')[0]
def parse_logfile(self, **kwargs):
"""Parser for THERMOCALC output.
It parses the outputs of THERMOCALC after calculation.
Args:
tx (bool): True for T-X and P-X calculations. Default False.
output (str): When not None, used as content of logfile. Default None.
resic (str): When | |
len(text) == 0) and \
isinstance(new_properties, str) and \
len(new_properties) > 0 and \
text != new_properties:
self.set_section_data(section_name, new_properties)
else:
properties = self.get_section_properties(section_name)
new_properties.update(properties)
self.set_section_properties(section_name, new_properties)
self._sections = self._order_sections(self._sections)
def validate_merge_defaults(self):
self._merge_default_values()
self._json_schema.validate(self.to_JSON())
self._validate_algorithm_problem()
self._validate_operator_problem()
def _validate_algorithm_problem(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is None:
return
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
problems = InputParser.get_algorithm_problems(algo_name)
if problem_name not in problems:
raise QiskitChemistryError("Problem: {} not in the list of problems: {} for algorithm: {}.".format(
problem_name, problems, algo_name))
def _validate_operator_problem(self):
operator_name = self.get_section_property(InputParser.OPERATOR, JSONSchema.NAME)
if operator_name is None:
return
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
problems = InputParser.get_operator_problems(operator_name)
if problem_name not in problems:
raise QiskitChemistryError(
"Problem: {} not in the list of problems: {} for operator: {}.".format(problem_name, problems, operator_name))
def to_JSON(self):
json_dict = OrderedDict()
for section_name in self.get_section_names():
if self.section_is_text(section_name):
json_dict[section_name] = self.get_section_text(section_name)
else:
json_dict[section_name] = self.get_section_properties(
section_name)
return json_dict
def to_dictionary(self):
dict = OrderedDict()
for section_name in self.get_section_names():
if self.section_is_text(section_name):
dict[section_name] = self.get_section_text(section_name).splitlines()
else:
dict[section_name] = self.get_section_properties(section_name)
return dict
def commit_changes(self):
self._original_sections = copy.deepcopy(self._sections)
def save_to_file(self, file_name):
if file_name is None:
raise QiskitChemistryError('Missing file path')
file_name = file_name.strip()
if len(file_name) == 0:
raise QiskitChemistryError('Missing file path')
prev_filename = self.get_filename()
sections = copy.deepcopy(self.get_sections())
if prev_filename is not None:
prev_dirname = os.path.dirname(os.path.realpath(prev_filename))
dirname = os.path.dirname(os.path.realpath(file_name))
if prev_dirname != dirname:
InputParser._from_relative_to_abs_paths(
sections, prev_filename)
contents = ''
lastIndex = len(sections) - 1
for i, (section_name, section) in enumerate(sections.items()):
contents += '{}{}'.format(InputParser._START_SECTION, section_name)
if self.section_is_text(section_name):
value = section['data']
if value is not None:
contents += '\n{}'.format(str(value))
else:
if 'properties' in section:
for k, v in section['properties'].items():
contents += '\n {}{}{}'.format(
k, InputParser._PROPVALUE_SEPARATOR, str(v))
contents += '\n{}'.format(InputParser._END_SECTION)
if i < lastIndex:
contents += '\n\n'
with open(file_name, 'w') as f:
print(contents, file=f)
def export_dictionary(self, file_name):
if file_name is None:
raise QiskitChemistryError('Missing file path')
file_name = file_name.strip()
if len(file_name) == 0:
raise QiskitChemistryError('Missing file path')
value = json.loads(json.dumps(self.to_dictionary()))
value = pprint.pformat(value, indent=4)
with open(file_name, 'w') as f:
print(value, file=f)
@staticmethod
def _from_relative_to_abs_paths(sections, filename):
directory = os.path.dirname(filename)
for _, section in sections.items():
if 'properties' in section:
for key, value in section['properties'].items():
if key == InputParser._HDF5_INPUT:
if value is not None and not os.path.isabs(value):
value = os.path.abspath(
os.path.join(directory, value))
InputParser._set_section_property(
sections, section[JSONSchema.NAME], key, value, ['string'])
def section_is_driver(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
InputParser._load_driver_names()
return section_name in InputParser._DRIVER_NAMES
def section_is_text(self, section_name):
section_name = JSONSchema.format_section_name(section_name).lower()
types = self.get_section_types(section_name)
if len(types) > 0:
return 'string' in types
return False
def get_sections(self):
return self._sections
def get_section(self, section_name):
"""Return a Section by name.
Args:
section_name (str): the name of the section, case insensitive
Returns:
Section: The section with this name
Raises:
QiskitChemistryError: if the section does not exist.
"""
section_name = JSONSchema.format_section_name(section_name).lower()
try:
return self._sections[section_name]
except KeyError:
raise QiskitChemistryError('No section "{0}"'.format(section_name))
def get_section_text(self, section_name):
section = self.get_section(section_name)
if section is None:
return ''
if 'data' in section:
return section['data']
return ''
def get_section_properties(self, section_name):
section = self.get_section(section_name)
if section is None:
return {}
if 'properties' in section:
return section['properties']
return {}
def get_section_property(self, section_name, property_name, default_value=None):
"""Return a property by name.
Args:
section_name (str): the name of the section, case insensitive
property_name (str): the property name in the section
default_value : default value in case it is not found
Returns:
Value: The property value
"""
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
if section_name in self._sections:
section = self._sections[section_name]
if 'properties' in section and property_name in section['properties']:
return section['properties'][property_name]
return default_value
def get_section_data(self, section_name, default_value=None):
"""
Return a section data.
Args:
section_name (str): the name of the section, case insensitive
default_value : default value in case it is not found
Returns:
Value: data value
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name in self._sections:
section = self._sections[section_name]
if 'data' in section:
return section['data']
return default_value
def set_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name not in self._sections:
self._sections[section_name] = OrderedDict(
[(JSONSchema.NAME, section_name)])
self._sections[section_name]['properties'] = OrderedDict()
self._sections[section_name]['data'] = ''
self._sections = self._order_sections(self._sections)
def delete_section(self, section_name):
"""
Args:
section_name (str): the name of the section, case insensitive
"""
section_name = JSONSchema.format_section_name(section_name).lower()
if section_name not in self._sections:
return
del self._sections[section_name]
# update schema
self._json_schema.rollback_changes()
self._json_schema.update_backend_schema()
self._json_schema.update_pluggable_input_schemas(self)
self._update_driver_input_schemas()
self._update_operator_input_schema()
def set_section_properties(self, section_name, properties):
self.delete_section_properties(section_name)
for property_name, value in properties.items():
self.set_section_property(section_name, property_name, value)
def set_section_property(self, section_name, property_name, value):
section_name = JSONSchema.format_section_name(section_name).lower()
property_name = JSONSchema.format_property_name(property_name)
value = self._json_schema.check_property_value(section_name, property_name, value)
types = self.get_property_types(section_name, property_name)
parser_temp = copy.deepcopy(self)
InputParser._set_section_property(parser_temp._sections, section_name, property_name, value, types)
msg = self._json_schema.validate_property(parser_temp.to_JSON(), section_name, property_name)
if msg is not None:
raise QiskitChemistryError("{}.{}: Value '{}': '{}'".format(section_name, property_name, value, msg))
# check if this provider is loadable and valid
if JSONSchema.BACKEND == section_name and property_name == JSONSchema.PROVIDER:
get_backends_from_provider(value)
InputParser._set_section_property(self._sections, section_name, property_name, value, types)
if property_name == JSONSchema.NAME:
if InputParser.OPERATOR == section_name:
self._update_operator_input_schema()
# remove properties that are not valid for this section
default_properties = self.get_section_default_properties(section_name)
if isinstance(default_properties, dict):
properties = self.get_section_properties(section_name)
for property_name in list(properties.keys()):
if property_name != JSONSchema.NAME and property_name not in default_properties:
self.delete_section_property(section_name, property_name)
elif JSONSchema.PROBLEM == section_name:
self._update_algorithm_problem()
self._update_operator_problem()
elif JSONSchema.BACKEND == section_name:
self._json_schema.update_backend_schema()
elif InputParser.is_pluggable_section(section_name):
self._json_schema.update_pluggable_input_schemas(self)
# remove properties that are not valid for this section
default_properties = self.get_section_default_properties(section_name)
if isinstance(default_properties, dict):
properties = self.get_section_properties(section_name)
for property_name in list(properties.keys()):
if property_name != JSONSchema.NAME and property_name not in default_properties:
self.delete_section_property(section_name, property_name)
if section_name == PluggableType.ALGORITHM.value:
self._update_dependency_sections()
elif value is not None:
value = str(value).lower().strip()
if len(value) > 0 and self.section_is_driver(value):
self._update_driver_input_schemas()
self._update_driver_sections()
self._sections = self._order_sections(self._sections)
def _update_algorithm_problem(self):
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
if algo_name is not None and problem_name in InputParser.get_algorithm_problems(algo_name):
return
for algo_name in local_pluggables(PluggableType.ALGORITHM):
if problem_name in self.get_algorithm_problems(algo_name):
# set to the first algorithm to solve the problem
self.set_section_property(
PluggableType.ALGORITHM.value, JSONSchema.NAME, algo_name)
return
# no algorithm solve this problem, remove section
self.delete_section(PluggableType.ALGORITHM.value)
def _update_operator_problem(self):
problem_name = self.get_section_property(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
problem_name = self.get_property_default_value(JSONSchema.PROBLEM, JSONSchema.NAME)
if problem_name is None:
raise QiskitChemistryError("No algorithm 'problem' section found on input.")
operator_name = self.get_section_property(
InputParser.OPERATOR, JSONSchema.NAME)
if operator_name is not None and problem_name in InputParser.get_operator_problems(operator_name):
return
for operator_name in local_chemistry_operators():
if problem_name in self.get_operator_problems(operator_name):
# set to the first input to solve the problem
self.set_section_property(InputParser.OPERATOR, JSONSchema.NAME, operator_name)
return
# no input solve this problem, remove section
self.delete_section(InputParser.OPERATOR)
def _update_dependency_sections(self):
algo_name = self.get_section_property(PluggableType.ALGORITHM.value, JSONSchema.NAME)
config = {} if algo_name is None else get_pluggable_configuration(PluggableType.ALGORITHM, algo_name)
classical = config['classical'] if 'classical' in config else False
pluggable_dependencies = [] if 'depends' not in config else config['depends']
pluggable_defaults = {} if 'defaults' not in config else config['defaults']
for pluggable_type in local_pluggables_types():
# remove pluggables from input that are not in the dependencies
if pluggable_type not in [PluggableType.INPUT, PluggableType.ALGORITHM] and \
pluggable_type.value not in pluggable_dependencies and \
pluggable_type.value in self._sections:
del self._sections[pluggable_type.value]
for pluggable_type in pluggable_dependencies:
pluggable_name = None
if pluggable_type in pluggable_defaults:
if JSONSchema.NAME in pluggable_defaults[pluggable_type]:
pluggable_name = pluggable_defaults[pluggable_type][JSONSchema.NAME]
if pluggable_name is not None and pluggable_type not in self._sections:
self.set_section_property(pluggable_type, JSONSchema.NAME, pluggable_name)
# update default values for new dependency pluggable types
self.set_section_properties(pluggable_type, self.get_section_default_properties(pluggable_type))
# update backend based on classical
if classical:
if JSONSchema.BACKEND in self._sections:
del self._sections[JSONSchema.BACKEND]
else:
if JSONSchema.BACKEND not in self._sections:
self.set_section_properties(JSONSchema.BACKEND, self.get_section_default_properties(JSONSchema.BACKEND))
# reorder sections
self._sections = self._order_sections(self._sections)
def _update_driver_sections(self):
driver_name = self.get_section_property(InputParser.DRIVER, JSONSchema.NAME)
if driver_name is not None:
driver_name = driver_name.strip().lower()
for name in local_drivers():
name = name.lower()
if driver_name is not None and driver_name == name:
continue
if name in self._sections:
del self._sections[name]
if driver_name is not None and driver_name not in self._sections:
self.set_section(driver_name)
value = self.get_section_default_properties(driver_name)
if isinstance(value, dict):
for property_name, property_value in value.items():
self.set_section_property(
driver_name, property_name, property_value)
else:
if value is None:
types = self.get_section_types(driver_name)
if 'null' not in types:
if 'string' in types:
value = ''
elif 'object' in types:
value | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" fogbench -- a Python script used to test FogLAMP.
The objective is to simulate payloads for input, REST and other requests against one or
more FogLAMP instances. This version of fogbench is meant to test the CoAP and HTTP plugins
interface of FogLAMP southbound services.
fogbench
[IN] -h --help Print this help
-i --interval The interval in seconds between each iteration (default: 0)
[IN] -k --keep Do not delete (keep) the running sample (default: no)
[IN] -o --output Set the output file for statistics
[IN] -p --payload Type of payload and protocol (default: coap)
[IN] -t --template Set the template to use
[IN] -v --version Display the version and exit
[IN] -H --host The FogLAMP host (default: localhost)
-I --iterations The number of iterations of the test (default: 1)
[IN] -O --occurrences The number of occurrences of the template (default: 1)
[IN] -P --port The FogLAMP port. Default depends on payload and protocol
[IN] -S --statistic The type of statistics to collect
Example:
$ cd $FOGLAMP_ROOT/bin
$ ./fogbench
Help:
$ ./fogbench -h
* Create reading objects from given template, as per the json file name specified with -t
* Save those objects to the file, as per the file name specified with -o
* Read those objects
* Send those to CoAP or HTTP south plugin server, on specific host and port
.. todo::
* Try generators
"""
import sys
import os
import random
import json
from datetime import datetime, timezone
import argparse
import collections
import asyncio
import aiohttp
from .exceptions import *
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_FOGBENCH_VERSION = u"0.1.1"
_start_time = []
_end_time = []
_tot_msgs_transferred = []
_tot_byte_transferred = []
_num_iterated = 0
"""Statistics to be collected"""
# _logger = logger.setup(__name__)
def local_timestamp():
"""
:return: str - current time stamp with microseconds and machine timezone info
:example '2018-05-08 14:06:40.517313+05:30'
"""
return str(datetime.now(timezone.utc).astimezone())
def read_templates():
templates = []
return templates
def parse_template_and_prepare_json(_template_file,
_write_to_file=None, _occurrences=1):
# template_file = os.path.join(os.path.dirname(__file__), "templates/" + _template_file)
with open(_template_file) as data_file:
data = json.load(data_file)
supported_format_types = ["number", "enum"]
for _ in range(_occurrences):
readings_ = _prepare_sensor_reading(data, supported_format_types)
for r in readings_:
_write_readings_to_file(_write_to_file, r)
def _write_readings_to_file(to_file, r):
with open(to_file, 'a') as the_file:
json.dump(r, the_file)
the_file.write(os.linesep)
def _prepare_sensor_reading(data, supported_format_types):
readings = []
for d in data:
x_sensor_values = dict()
_sensor_value_object_formats = d["sensor_values"]
for fmt in _sensor_value_object_formats:
if fmt["type"] not in supported_format_types:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Can not parse type {}".format(fmt["type"]))
if fmt["type"] == "number":
# check float precision if any
precision = fmt.get("precision", None)
min_val = fmt.get("min", None)
max_val = fmt.get("max", None)
if min_val is None or max_val is None:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Min and Max values must be defined for type number.")
# print(precision)
# print(min_val)
# print(max_val)
reading = round(random.uniform(min_val, max_val), precision)
elif fmt["type"] == "enum":
reading = random.choice(fmt["list"])
# print(fmt["name"], reading)
x_sensor_values[fmt["name"]] = reading
# print(d["name"])
sensor_value_object = dict()
sensor_value_object["asset"] = d['name']
sensor_value_object["readings"] = x_sensor_values
sensor_value_object["timestamp"] = "{!s}".format(local_timestamp())
# print(json.dumps(sensor_value_object))
ord_dict = collections.OrderedDict(sorted(sensor_value_object.items()))
readings.append(ord_dict)
return readings
def read_out_file(_file=None, _keep=False, _iterations=1, _interval=0, send_to='coap'):
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
# from pprint import pprint
import time
# _file = os.path.join(os.path.dirname(__file__), "out/{}".format(outfile))
with open(_file) as f:
readings_list = [json.loads(line) for line in f]
loop = asyncio.get_event_loop()
while _iterations > 0:
# Pre-calculate the messages and size
msg_transferred_itr = 0 # Messages transferred in every iteration
byte_transferred_itr = 0 # Bytes transferred in every iteration
for r in readings_list:
msg_transferred_itr += 1
byte_transferred_itr += sys.getsizeof(r)
if send_to == 'coap':
_start_time.append(datetime.now())
for r in readings_list:
is_sent = loop.run_until_complete(send_to_coap(r))
if not is_sent:
break
elif send_to == 'http':
_start_time.append(datetime.now())
loop.run_until_complete(send_to_http(readings_list))
_end_time.append(datetime.now()) # End time of every iteration
_tot_msgs_transferred.append(msg_transferred_itr)
_tot_byte_transferred.append(byte_transferred_itr)
_iterations -= 1
_num_iterated += 1
if _iterations != 0:
# print(u"Iteration {} completed, waiting for {} seconds".format(_iterations, _interval))
time.sleep(_interval)
if not _keep:
os.remove(_file)
async def send_to_coap(payload):
"""
POST request to:
localhost
port 5683 (official IANA assigned CoAP port),
URI "/other/sensor-values".
"""
from aiocoap import Context, Message
from aiocoap.numbers.codes import Code
from cbor2 import dumps
context = await Context.create_client_context()
request = Message(payload=dumps(payload), code=Code.POST)
request.opt.uri_host = arg_host
request.opt.uri_port = arg_port
request.opt.uri_path = ("other", "sensor-values")
response = await context.request(request).response
str_res = str(response.code)
status_code = str_res[:4] # or str_res.split()[0]
if status_code == "4.00" or status_code == "5.00":
print("Error: ", str_res)
return False
return True
async def send_to_http(payload):
"""
POST request to:
host localhost
port 6683 (default HTTP south plugin port),
uri sensor-reading
"""
headers = {'content-type': 'application/json'}
url = 'http://{}:{}/sensor-reading'.format(arg_host, arg_port)
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(payload), headers=headers) as resp:
await resp.text()
status_code = resp.status
if status_code in range(400, 500):
print("Bad request error | code:{}, reason: {}".format(status_code, resp.reason))
return False
if status_code in range(500, 600):
print("Server error | code:{}, reason: {}".format(status_code, resp.reason))
return False
return True
def get_statistics(_stats_type=None, _out_file=None):
stat = ''
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
if _stats_type == 'total':
stat += u"Total Statistics:\n"
stat += (u"\nStart Time: {}".format(datetime.strftime(_start_time[0], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nEnd Time: {}\n".format(datetime.strftime(_end_time[-1], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nTotal Messages Transferred: {}".format(sum(_tot_msgs_transferred)))
stat += (u"\nTotal Bytes Transferred: {}\n".format(sum(_tot_byte_transferred)))
stat += (u"\nTotal Iterations: {}".format(_num_iterated))
stat += (u"\nTotal Messages per Iteration: {}".format(sum(_tot_msgs_transferred)/_num_iterated))
stat += (u"\nTotal Bytes per Iteration: {}\n".format(sum(_tot_byte_transferred)/_num_iterated))
_msg_rate = []
_byte_rate = []
for itr in range(_num_iterated):
time_taken = _end_time[itr] - _start_time[itr]
_msg_rate.append(_tot_msgs_transferred[itr]/(time_taken.seconds+time_taken.microseconds/1E6))
_byte_rate.append(_tot_byte_transferred[itr] / (time_taken.seconds+time_taken.microseconds/1E6))
stat += (u"\nMin messages/second: {}".format(min(_msg_rate)))
stat += (u"\nMax messages/second: {}".format(max(_msg_rate)))
stat += (u"\nAvg messages/second: {}\n".format(sum(_msg_rate)/_num_iterated))
stat += (u"\nMin Bytes/second: {}".format(min(_byte_rate)))
stat += (u"\nMax Bytes/second: {}".format(max(_byte_rate)))
stat += (u"\nAvg Bytes/second: {}".format(sum(_byte_rate)/_num_iterated))
if _out_file:
with open(_out_file, 'w') as f:
f.write(stat)
else:
print(stat)
# should we also show total time diff? end_time - start_time
def check_server(payload_type='coap'):
template_str = ">>> Make sure south {} plugin service is running \n & listening on specified host and port \n"
if payload_type == 'coap':
print(template_str.format("CoAP"))
elif payload_type == 'http':
print(template_str.format("HTTP"))
parser = argparse.ArgumentParser(prog='fogbench')
parser.description = '%(prog)s -- a Python script used to test FogLAMP (simulate payloads)'
parser.epilog = 'The initial version of %(prog)s is meant to test the south plugin interface of ' \
'FogLAMP using CoAP or HTTP'
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0!s}'.format(_FOGBENCH_VERSION))
parser.add_argument('-k', '--keep', default=False, choices=['y', 'yes', 'n', 'no'],
help='Do not delete the running sample (default: no)')
parser.add_argument('-t', '--template', required=True, help='Set the template file, json extension')
parser.add_argument('-o', '--output', default=None, help='Set the statistics output file')
parser.add_argument('-p', '--payload', default='coap', choices=['coap', 'http'], help='Type of payload '
'and protocol (default: coap)')
parser.add_argument('-I', '--iterations', help='The number of iterations of the test (default: 1)')
parser.add_argument('-O', '--occurrences', help='The number of occurrences of the template (default: 1)')
parser.add_argument('-H', '--host', help='Server host address (default: localhost)')
parser.add_argument('-P', '--port', help='The FogLAMP port. (default: 5683)')
parser.add_argument('-i', '--interval', default=0, help='The interval in seconds for each iteration (default: 0)')
parser.add_argument('-S', '--statistics', default='total', choices=['total'], help='The type of statistics to collect '
'(default: total)')
namespace = parser.parse_args(sys.argv[1:])
infile = '{0}'.format(namespace.template if namespace.template else '')
statistics_file = os.path.join(os.path.dirname(__file__), "out/{}".format(namespace.output)) if namespace.output else None
keep_the_file = True if namespace.keep in ['y', 'yes'] else False
# iterations and occurrences
arg_iterations = int(namespace.iterations) if namespace.iterations else 1
arg_occurrences = int(namespace.occurrences) if namespace.occurrences else 1
# interval between each iteration
arg_interval = int(namespace.interval) if namespace.interval else 0
arg_stats_type = '{0}'.format(namespace.statistics) if namespace.statistics else 'total'
if namespace.payload:
arg_payload_protocol = namespace.payload
arg_host = '{0}'.format(namespace.host) if namespace.host else 'localhost'
default_port = 6683 if arg_payload_protocol == 'http' else 5683
arg_port = int(namespace.port) if namespace.port else default_port
check_server(arg_payload_protocol)
sample_file = os.path.join("/tmp", "foglamp_running_sample.{}".format(os.getpid()))
parse_template_and_prepare_json(_template_file=infile, _write_to_file=sample_file, _occurrences=arg_occurrences)
read_out_file(_file=sample_file, _keep=keep_the_file, _iterations=arg_iterations, _interval=arg_interval,
send_to=arg_payload_protocol)
get_statistics(_stats_type=arg_stats_type, _out_file=statistics_file)
# TODO: Change below per local_timestamp() values
""" Expected output from given template
{
"timestamp" : "2017-08-04T06:59:57.503Z",
"asset" : "TI sensorTag/luxometer",
"sensor_values" : { "lux" : 49 }
}
{
"timestamp" : "2017-08-04T06:59:57.863Z",
"asset" : "TI sensorTag/pressure",
"sensor_values" : { "pressure" : 1021.2 }
}
{
"timestamp" : "2017-08-04T06:59:58.863Z",
"asset" : "TI sensorTag/humidity",
"sensor_values" : { "humidity" : 71.2, "temperature" : 18.6 }
}
{
"timestamp" : "2017-08-04T06:59:59.863Z",
"asset" : "TI sensorTag/temperature",
"sensor_values" : { "object" : 18.2, "ambient" : 21.6 }
}
{
"timestamp" : "2017-08-04T07:00:00.863Z",
"asset" : "TI sensorTag/accelerometer",
"sensor_values" : { "x" : 1.2, "y" : 0.0, "z" : -0.6 }
}
{
"timestamp" : "2017-08-04T07:00:01.863Z",
"asset" : "TI sensorTag/gyroscope",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:02.863Z",
"asset" : "TI sensorTag/magnetometer",
"sensor_values" : | |
<reponame>m-ignatov/cloud-security-xsuaa-integration
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2018-2021 SAP SE or an SAP affiliate company and Cloud Security Client Java contributors
# SPDX-License-Identifier: Apache-2.0
import abc
import distutils
import http
import ssl
import subprocess
import urllib.request
from urllib.parse import urlencode
from urllib.error import HTTPError
from base64 import b64encode
import json
import unittest
import logging
import os
import time
import re
from getpass import getpass
from distutils.core import setup
# Usage information
# To run this script you must be logged into CF via 'cf login' Also make sure
# to change settings in vars.yml to your needs. This script deploys sample
# apps and fires post request against their endpoints. For some samples it
# needs to create a password token for which you need to provide your password
# (same as you would use for 'cf login'). You can do this by either supplying
# it via the system environment variable 'CFPASSWORD' or by typing when the
# script prompts for the password. The same goes for the username with the
# variable 'CFUSER'.
# For IAS tests where manual user interaction is required to add user roles in SCP Cockpit,
# system environment variable USER_INPUT_ENABLED needs to be set to true
# by setting the value to 'y', 'yes', 't', 'true', 'on', '1' if it's disabled user input won't be requested.
# Dependencies
# The script depends on python3 and the cloud foundry command line tool 'cf'.
# Running the script
# If the script is made executable, it can be started with cd
# It can also be started like so: python3 ./deploy_and_test.py
# By default it will run all unit tests.
# It is also possible to run specific test classes (if no token is required):
# python3 -m unittest deploy_and_test.TestJavaSecurity.test_hello_java_security
# This would only the run the test called 'test_hello_java_security'
# inside the test class 'TestJavaSecurity' inside the deploy_and_test.py file.
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] - [%(module)s.%(funcName)s: L%(lineno)d]: %(message)s')
cf_logs = open('cf-logs.log', 'w')
java_logs = open('java-logs.log', 'w')
RUN_TESTS = "Running '{}' tests"
RUN_TEST = "Running '{}' test"
EXPECT_200 = "Expected HTTP status 200"
EXPECT_401 = "Expected HTTP status 401"
EXPECT_403 = "Expected HTTP status 403"
class Credentials:
def __init__(self):
self.username = self.__get_env_variable('CFUSER', lambda: input("Username: "))
self.password = self.__get_env_variable('CFPASSWORD', lambda: getpass())
def __get_env_variable(self, env_variable_name, prompt_function):
value = os.getenv(env_variable_name)
if value is None:
value = prompt_function()
return value
credentials = Credentials()
# Abstract base class for sample app tests classes
class SampleTest(abc.ABC, unittest.TestCase):
cf_app = None
api_access = None
ias_access = None
@classmethod
@abc.abstractmethod
def get_app(cls):
"""Should return the sample app that should be tested """
cls.skipTest('Dont run abstract base class')
return cls.cf_app
@classmethod
def setUpClass(cls):
vars_file = open('./vars.yml')
cls.vars_parser = VarsParser(vars_file.read())
vars_file.close()
cls.cf_apps = CFApps()
cls.__deployed_app = None
cls.get_app(cls).deploy()
cls.credentials = credentials
time.sleep(2) # waiting for deployed apps to be available
@classmethod
def tearDownClass(cls):
cls.cf_app.delete()
if cls.api_access is not None:
cls.api_access.delete()
if cls.ias_access is not None:
cls.ias_access.delete()
def add_user_to_role(self, role):
logging.info('Assigning role collection {} for user {}'.format(role, self.credentials.username))
user = self.__get_api_access().get_user_by_username(self.credentials.username)
user_id = user.get('id')
resp = self.__get_api_access().add_user_to_group(user_id, role)
if not resp.is_ok and resp.status != 409: # 409 is returned when role is already assigned to the user
logging.error(
"Could not set '{}' role to user '{}'. Error: {} - {}".format(role, user.get('userName'), resp.status,
resp.body))
exit()
def perform_get_request(self, path, username=None, password=None):
if username is not None and password is not None:
authorization_value = b64encode(
bytes(username + ':' + password + self.__get_2factor_auth_code(), 'utf-8')).decode("ascii")
return self.__perform_get_request(path=path,
additional_headers={'Authorization': 'Basic ' + authorization_value})
return self.__perform_get_request(path=path)
def perform_get_request_with_token(self, path, additional_headers={}):
access_token = self.get_token().get('access_token')
if access_token is None:
logging.error("Cannot continue without access token")
exit()
return self.__perform_get_request(path=path, access_token=access_token, additional_headers=additional_headers)
def perform_get_request_with_ias_token(self, path, id_token, additional_headers={}):
return self.__perform_get_request(path=path, access_token=id_token, additional_headers=additional_headers)
def get_deployed_app(self):
if self.__deployed_app is None:
deployed_app = self.cf_apps.app_by_name(self.cf_app.name)
if deployed_app is None:
logging.error('Could not find app: ' + self.cf_app.name)
exit()
self.__deployed_app = deployed_app
return self.__deployed_app
def get_token(self):
deployed_app = self.get_deployed_app()
logging.info('GET xsuaa token to {} for user {} (credential-type = {}, clientid = {}, clientsecret = {})'.format(deployed_app.xsuaa_service_url,
self.credentials.username,
deployed_app.credential_type,
deployed_app.clientid,
deployed_app.clientsecret))
if deployed_app.credential_type == 'x509':
body = HttpUtil.encode_request_body(self, clientid=deployed_app.clientid,
grant_type='password',
username=self.credentials.username,
password=<PASSWORD> + self.__get_2factor_auth_code())
return HttpUtil.post_request_x509(self, url=deployed_app.xsuaa_cert_url,
data=body,
certificate=deployed_app.certificate,
key=deployed_app.key)
else:
return HttpUtil().get_token(
xsuaa_service_url=deployed_app.xsuaa_service_url,
clientid=deployed_app.clientid,
clientsecret=deployed_app.clientsecret,
grant_type='password',
username=self.credentials.username,
password=<PASSWORD> + self.__get_2factor_auth_code())
def get_id_token(self):
deployed_app = self.get_deployed_app()
logging.info(
'GET id token to {} for user {} ({}, {})'.format(deployed_app.ias_service_url, self.credentials.username,
deployed_app.ias_clientid, deployed_app.ias_clientsecret))
id_token = HttpUtil().get_id_token(
ias_service_url=deployed_app.ias_service_url + '/oauth2/token',
clientid=deployed_app.ias_clientid,
clientsecret=deployed_app.ias_clientsecret,
grant_type='password',
username=self.credentials.username,
password=self.credentials.password).get('id_token')
if id_token is None:
logging.error("Cannot continue without id token")
exit()
return id_token
@classmethod
def __get_api_access(cls):
if cls.api_access is None:
deployed_app = cls.get_deployed_app(cls)
cls.api_access = ApiAccessService(
xsuaa_service_url=deployed_app.xsuaa_service_url,
xsuaa_api_url=deployed_app.xsuaa_api_url)
return cls.api_access
@classmethod
def get_ias_access(cls, ias_name):
if cls.ias_access is None:
cls.ias_access = IasAccess(ias_name=ias_name)
return cls.ias_access
def __perform_get_request(self, path, access_token=None, additional_headers={}):
url = 'https://{}-{}.{}{}'.format(
self.cf_app.name,
self.vars_parser.user_id,
self.vars_parser.landscape_apps_domain,
path)
logging.info('GET request to {} {}'
.format(url, 'with access token: ' + access_token if access_token else 'without access token'))
resp = HttpUtil().get_request(url, access_token=access_token, additional_headers=additional_headers)
logging.info('Response: ' + str(resp))
return resp
def __get_2factor_auth_code(self):
auth_code = ""
if os.getenv('ENABLE_2_FACTOR') is not None:
auth_code = input("2-Factor Authenticator Code: ") or ""
return auth_code
def prompt_user_role_assignment(self):
usr_input_enabled = os.getenv("USER_INPUT_ENABLED")
if usr_input_enabled and bool(distutils.util.strtobool(usr_input_enabled)) is True:
input("Can't add user Role Collection to the custom IAS origin. \n"
"Please add the role 'Viewer' to user {} in SCP Cockpit manually. \n"
"Once done press enter to proceed with the test."
.format(self.credentials.username))
return True
return False
class TestTokenClient(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("TokenClientUsage"))
self.cf_app = CFApp(name='java-tokenclient-usage', xsuaa_service_name='xsuaa-token-client')
return self.cf_app
def test_hello_token_client(self):
logging.info(RUN_TEST.format("TestTokenClient.test_hello_token_client"))
response = self.perform_get_request('/hello-token-client')
self.assertEqual(response.status, 200, EXPECT_200)
body = response.body
self.assertIsNotNone(body)
self.assertRegex(body, "Access-Token: ")
self.assertRegex(body, "Access-Token-Payload: ")
self.assertRegex(body, "Expired-At: ")
class TestJavaSecurity(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("JavaSecurityUsage"))
self.cf_app = CFApp(name='java-security-usage', xsuaa_service_name='xsuaa-java-security')
return self.cf_app
def test_hello_java_security(self):
logging.info(RUN_TEST.format("TestJavaSecurity.test_hello_java_security"))
resp = self.perform_get_request('/hello-java-security')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request('/hello-java-security-authz')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request_with_token('/hello-java-security-authz')
self.assertEqual(resp.status, 403, EXPECT_403)
self.add_user_to_role('JAVA_SECURITY_SAMPLE_Viewer')
resp = self.perform_get_request_with_token('/hello-java-security-authz')
self.assertEqual(resp.status, 200, EXPECT_200)
resp = self.perform_get_request_with_token('/hello-java-security')
self.assertEqual(resp.status, 200, EXPECT_200)
xsappname = self.get_deployed_app().get_credentials_property('xsappname')
expected_scope = xsappname + '.Read'
self.assertIsNotNone(resp.body)
self.assertRegex(resp.body, self.credentials.username,
"Did not find username '{}' in response body".format(self.credentials.username))
self.assertRegex(resp.body, expected_scope,
"Expected to find scope '{}' in response body: ".format(expected_scope))
class TestSpringSecurityHybrid(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("SpringSecurityHybrid"))
self.cf_app = CFApp(name='spring-security-hybrid-usage', xsuaa_service_name='xsuaa-authn', identity_service_name='ias-authn')
return self.cf_app
def test_sayHello_xsuaa(self):
resp = self.perform_get_request('/sayHello')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request_with_token('/sayHello')
self.assertEqual(resp.status, 403, EXPECT_403)
self.add_user_to_role('XSUAA-Viewer')
resp = self.perform_get_request_with_token('/sayHello')
self.assertEqual(resp.status, 200, EXPECT_200)
clientid = self.get_deployed_app().get_credentials_property('clientid')
self.assertRegex(resp.body, clientid, 'Expected to find clientid in response')
resp = self.perform_get_request_with_token('/method')
self.assertEqual(resp.status, 200, EXPECT_200)
self.assertRegex(resp.body, 'You got the sensitive data for zone', 'Expected another response.')
def test_sayHello_ias(self):
resp = self.perform_get_request_with_ias_token('/sayHello', self.get_id_token())
self.assertEqual(resp.status, 403, EXPECT_403)
class TestJavaSecurityIas(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("JavaSecurityIas"))
self.cf_app = CFApp(name='java-security-usage-ias', identity_service_name='ias-java-security')
return self.cf_app
def test_sayHello_ias(self):
resp = self.perform_get_request('/hello-java-security-ias')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request_with_ias_token('/hello-java-security-ias', self.get_id_token())
self.assertEqual(resp.status, 200, EXPECT_200)
self.assertIsNotNone(resp.body)
self.assertRegex(resp.body, "are authenticated and can access the application.")
class TestSpringSecurity(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("SpringSecurityUsageMtls"))
self.cf_app = CFApp(name='spring-security-xsuaa-usage', xsuaa_service_name='xsuaa-authentication',
app_router_name='approuter-spring-security-xsuaa-usage')
return self.cf_app
def test_sayHello(self):
logging.info(RUN_TEST.format("TestSpringSecurity.test_sayHello'"))
resp = self.perform_get_request('/v1/sayHello')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 403, EXPECT_403)
self.add_user_to_role('Viewer')
resp = self.perform_get_request_with_token('/v1/sayHello')
self.assertEqual(resp.status, 200, EXPECT_200)
xsappname = self.get_deployed_app().get_credentials_property('xsappname')
self.assertRegex(resp.body, xsappname, 'Expected to find xsappname in response')
def test_tokenFlows(self):
logging.info(RUN_TEST.format("TestSpringSecurity.test_tokenFlows"))
self.add_user_to_role('Viewer')
resp = self.perform_get_request_with_token('/v2/sayHello')
self.assertEqual(resp.status, 200, EXPECT_200)
resp = self.perform_get_request_with_token('/v3/requestClientCredentialsToken')
self.assertEqual(resp.status, 200, EXPECT_200)
resp = self.perform_get_request_with_token('/v3/requestUserToken')
self.assertEqual(resp.status, 200, EXPECT_200)
token = self.get_token()
path_with_refresh_token = '/v3/requestRefreshToken/' + token.get('refresh_token')
resp = self.perform_get_request_with_token(path_with_refresh_token)
self.assertEqual(resp.status, 200, EXPECT_200)
def test_sayHello_ias(self):
ias_service = self.get_ias_access("ias-spring-sec")
resp = self.perform_get_request_with_ias_token('/v1/sayHello', ias_service.fetch_ias_token(self))
self.assertEqual(resp.status, 403, EXPECT_403)
if self.prompt_user_role_assignment():
resp = self.perform_get_request_with_ias_token('/v1/sayHello', ias_service.fetch_ias_token(self))
if resp.status != 200:
logging.warning("In case after adding role collection, user is still not authorized. "
"Check in IAS admin panel that the application's '{}' Subject Name Identifier is set to email. "
"Bug: NGPBUG-139441 "
.format(ias_service.ias_service_name))
self.assertEqual(resp.status, 200, EXPECT_200)
xsappname = self.get_deployed_app().get_credentials_property('xsappname')
self.assertRegex(resp.body, xsappname, 'Expected to find xsappname in response')
else:
logging.warning('test_sayHello_ias was skipped. To run test enable environment variable USER_INPUT_ENABLED=true')
def test_open_endpoint(self):
resp = self.perform_get_request('/health')
self.assertEqual(resp.status, 200, EXPECT_200)
class TestSpringSecurityNonMtls(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("SpringSecurityUsageNonMtls"))
self.cf_app = CFApp(name='spring-security-xsuaa-usage',
xsuaa_service_name='xsuaa-authentication',
app_router_name='approuter-spring-security-xsuaa-usage',
security_descriptor='xs-security-deprecated.json')
return self.cf_app
def test_tokenFlows(self):
logging.info(RUN_TEST.format("TestSpringSecurity.test_tokenFlows"))
self.add_user_to_role('Viewer')
resp = self.perform_get_request_with_token('/v2/sayHello')
self.assertEqual(resp.status, 200, EXPECT_200)
resp = self.perform_get_request_with_token('/v3/requestClientCredentialsToken')
self.assertEqual(resp.status, 200, EXPECT_200)
resp = self.perform_get_request_with_token('/v3/requestUserToken')
self.assertEqual(resp.status, 200, EXPECT_200)
token = self.get_token()
path_with_refresh_token = '/v3/requestRefreshToken/' + token.get('refresh_token')
resp = self.perform_get_request_with_token(path_with_refresh_token)
self.assertEqual(resp.status, 200, EXPECT_200)
class TestJavaBuildpackApiUsage(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("JavaBuildpackApiUsage"))
self.cf_app = CFApp(name='sap-java-buildpack-api-usage',
xsuaa_service_name='xsuaa-buildpack',
app_router_name='approuter-sap-java-buildpack-api-usage')
return self.cf_app
def test_hello_token_servlet(self):
logging.info(RUN_TEST.format("TestJavaBuildpackApiUsage.test_hello_token_servlet"))
resp = self.perform_get_request('/hello-token')
self.assertEqual(resp.status, 401, EXPECT_401)
resp = self.perform_get_request_with_token('/hello-token')
self.assertEqual(resp.status, 403, EXPECT_403)
self.add_user_to_role('Buildpack_API_Viewer')
resp = self.perform_get_request_with_token('/hello-token')
self.assertEqual(resp.status, 200, EXPECT_200)
self.assertRegex(resp.body, self.credentials.username, 'Expected to find username in response')
class SpringSecurityBasicAuthTest(SampleTest):
def get_app(self):
logging.info(RUN_TESTS.format("SpringSecurityBasicAuthTest"))
self.cf_app | |
# -*- coding: utf-8 -*-
# :Project: pglast -- Printer functions for SQL DML nodes
# :Created: sab 05 ago 2017 16:34:08 CEST
# :Author: <NAME> <<EMAIL>>
# :License: GNU General Public License version 3 or later
# :Copyright: © 2017, 2018, 2019, 2020, 2021 <NAME>
#
from .. import enums
from ..node import Missing, List
from . import IntEnumPrinter, node_printer
@node_printer('A_ArrayExpr')
def a_array_expr(node, output):
output.write('ARRAY[')
if node.elements:
output.print_list(node.elements)
output.write(']')
@node_printer('A_Const')
def a_const(node, output):
output.print_node(node.val)
class AExprKindPrinter(IntEnumPrinter):
enum = enums.A_Expr_Kind
def AEXPR_BETWEEN(self, node, output):
output.print_node(node.lexpr)
output.swrites('BETWEEN')
output.print_list(node.rexpr, 'AND', relative_indent=-4)
def AEXPR_BETWEEN_SYM(self, node, output):
output.print_node(node.lexpr)
output.swrites('BETWEEN SYMMETRIC')
output.print_list(node.rexpr, 'AND', relative_indent=-4)
def AEXPR_DISTINCT(self, node, output):
if node.lexpr.node_tag == 'BoolExpr':
output.write('(')
output.print_node(node.lexpr)
if node.lexpr.node_tag == 'BoolExpr':
output.write(') ')
output.swrites('IS DISTINCT FROM')
output.print_node(node.rexpr)
def AEXPR_ILIKE(self, node, output):
output.print_node(node.lexpr)
if node.name.string_value == '!~~*':
output.swrites('NOT')
output.swrites('ILIKE')
output.print_node(node.rexpr)
def AEXPR_IN(self, node, output):
output.print_node(node.lexpr)
if node.name.string_value == '<>':
output.swrites('NOT')
output.swrite('IN (')
output.print_list(node.rexpr)
output.write(')')
def AEXPR_LIKE(self, node, output):
output.print_node(node.lexpr)
if node.name.string_value == '!~~':
output.swrites('NOT')
output.swrites('LIKE')
output.print_node(node.rexpr)
def AEXPR_NOT_BETWEEN(self, node, output):
output.print_node(node.lexpr)
output.swrites('NOT BETWEEN')
output.print_list(node.rexpr, 'AND', relative_indent=-4)
def AEXPR_NOT_BETWEEN_SYM(self, node, output):
output.print_node(node.lexpr)
output.swrites('NOT BETWEEN SYMMETRIC')
output.print_list(node.rexpr, 'AND', relative_indent=-4)
def AEXPR_NOT_DISTINCT(self, node, output):
output.print_node(node.lexpr)
output.swrites('IS NOT DISTINCT FROM')
output.print_node(node.rexpr)
def AEXPR_NULLIF(self, node, output):
output.write('NULLIF(')
output.print_list((node.lexpr, node.rexpr))
output.write(')')
def AEXPR_OF(self, node, output):
output.print_node(node.lexpr)
output.swrites('IS')
if node.name.string_value == '<>':
output.writes('NOT')
output.write('OF (')
output.print_list(node.rexpr)
output.write(')')
def AEXPR_OP(self, node, output):
with output.expression():
# lexpr is optional because these are valid: -(1+1), +(1+1), ~(1+1)
if node.lexpr is not Missing:
if node.lexpr.node_tag == 'A_Expr':
if node.lexpr.kind == node.kind and node.lexpr.name == node.name:
output.print_node(node.lexpr)
else:
with output.expression():
output.print_node(node.lexpr)
else:
output.print_node(node.lexpr)
output.write(' ')
if isinstance(node.name, List) and len(node.name) > 1:
output.write('OPERATOR(')
output.print_symbol(node.name)
output.write(') ')
else:
output.print_symbol(node.name)
output.write(' ')
if node.rexpr is not Missing:
if node.rexpr.node_tag == 'A_Expr':
if node.rexpr.kind == node.kind and node.rexpr.name == node.name:
output.print_node(node.rexpr)
else:
with output.expression():
output.print_node(node.rexpr)
else:
output.print_node(node.rexpr)
def AEXPR_OP_ALL(self, node, output):
output.print_node(node.lexpr)
output.write(' ')
output.write(node.name.string_value)
output.write(' ALL(')
output.print_node(node.rexpr)
output.write(')')
def AEXPR_OP_ANY(self, node, output):
output.print_node(node.lexpr)
output.write(' ')
output.write(node.name.string_value)
output.write(' ANY(')
output.print_node(node.rexpr)
output.write(')')
def AEXPR_PAREN(self, node, output): # pragma: no cover
# FIXME: accordingly with the documentation of the A_Expr_Kind typedef, AEXPR_PAREN is
# a “nameless dummy node for parentheses”. What does that mean? I wasn't able to
# “produce” it in any way...
raise NotImplementedError("Expression of kind %s not implemented yet"
% self.enum.AEXPR_PAREN)
def AEXPR_SIMILAR(self, node, output):
output.print_node(node.lexpr)
if node.name.string_value == '!~':
output.swrites('NOT')
output.swrites('SIMILAR TO')
assert (node.rexpr.node_tag == 'FuncCall'
and node.rexpr.funcname[1].val.value == 'similar_to_escape')
pattern = node.rexpr.args[0]
output.print_node(pattern)
if len(node.rexpr.args) > 1:
output.swrites('ESCAPE')
output.print_node(node.rexpr.args[1])
a_expr_kind_printer = AExprKindPrinter()
@node_printer('A_Expr')
def a_expr(node, output):
a_expr_kind_printer(node.kind, node, output)
@node_printer('A_Indices')
def a_indices(node, output):
output.write('[')
if node.is_slice:
if node.lidx:
output.print_node(node.lidx)
output.write(':')
if node.uidx:
output.print_node(node.uidx)
else:
output.print_node(node.uidx)
output.write(']')
@node_printer('A_Indirection')
def a_indirection(node, output):
bracket = ((node.arg.node_tag in ('A_Expr', 'A_Indirection', 'FuncCall',
'RowExpr', 'TypeCast'))
or
(node.arg.node_tag == 'ColumnRef'
and node.indirection[0].node_tag != 'A_Indices'))
if bracket:
output.write('(')
output.print_node(node.arg)
if bracket:
output.write(')')
output.print_list(node.indirection, '', standalone_items=False)
@node_printer('A_Indirection', 'A_Star')
def a_indirection_a_star(node, output):
output.pending_separator = False
output.write('.')
a_star(node, output)
@node_printer('A_Indirection', 'ColumnRef')
def a_indirection_column_ref(node, output):
output.write('(')
column_ref(node, output)
output.write(')')
@node_printer('A_Indirection', 'FuncCall')
def a_indirection_func_call(node, output):
output.write('(')
func_call(node, output)
output.write(')')
@node_printer('A_Indirection', 'String')
def a_indirection_field(node, output):
output.write('.')
string(node, output, is_name=True)
@node_printer('A_Star')
def a_star(node, output):
output.write('*')
@node_printer('Alias')
def alias(node, output):
output.print_name(node.aliasname)
if node.colnames:
output.swrite('(')
output.print_name(node.colnames, sep=',')
output.write(')')
@node_printer('BitString')
def bitstring(node, output):
output.write(f"{node.val.value[0]}'")
output.write(node.val.value[1:])
output.write("'")
@node_printer('BoolExpr')
def bool_expr(node, output):
bet = enums.BoolExprType
outer_exp_level = output.expression_level
with output.expression():
in_res_target = node.parent_node.node_tag == 'ResTarget'
if node.boolop == bet.AND_EXPR:
relindent = -4 if not in_res_target and outer_exp_level == 0 else None
output.print_list(node.args, 'AND', relative_indent=relindent)
elif node.boolop == bet.OR_EXPR:
with output.expression():
relindent = -4 if not in_res_target and outer_exp_level == 0 else None
output.print_list(node.args, 'OR', relative_indent=relindent)
else:
output.writes('NOT')
output.print_node(node.args[0])
class BooleanTestPrinter(IntEnumPrinter):
enum = enums.BoolTestType
def IS_FALSE(self, node, output):
output.write('FALSE')
def IS_NOT_FALSE(self, node, output):
output.write('NOT FALSE')
def IS_NOT_TRUE(self, node, output):
output.write('NOT TRUE')
def IS_NOT_UNKNOWN(self, node, output):
output.write('NOT UNKNOWN')
def IS_TRUE(self, node, output):
output.write('TRUE')
def IS_UNKNOWN(self, node, output):
output.write('UNKNOWN')
boolean_test_printer = BooleanTestPrinter()
@node_printer('BooleanTest')
def boolean_test(node, output):
output.print_node(node.arg)
output.write(' IS ')
boolean_test_printer(node.booltesttype, node, output)
@node_printer('CallStmt')
def call_stmt(node, output):
output.write('CALL ')
output.print_node(node.funccall)
@node_printer('CaseExpr')
def case_expr(node, output):
with output.push_indent():
output.writes('CASE')
if node.arg:
output.print_node(node.arg)
output.newline()
output.space(2)
with output.push_indent():
output.print_list(node.args, '')
if node.defresult:
output.newline()
output.write('ELSE ')
output.print_node(node.defresult)
output.newline()
output.write('END')
@node_printer('CaseWhen')
def case_when(node, output):
output.write('WHEN ')
with output.push_indent(-3):
with output.expression():
output.print_node(node.expr)
output.newline()
output.write('THEN ')
output.print_node(node.result)
@node_printer('CoalesceExpr')
def coalesce_expr(node, output):
output.write('COALESCE(')
output.print_list(node.args)
output.write(')')
@node_printer('CollateClause')
def collate_clause(node, output):
if node.arg:
with output.expression():
output.print_node(node.arg)
output.swrite('COLLATE ')
output.print_name(node.collname, '.')
@node_printer('ColumnRef')
def column_ref(node, output):
output.print_name(node.fields)
class CTEMaterializedPrinter(IntEnumPrinter):
enum = enums.CTEMaterialize
def CTEMaterializeAlways(self, node, output):
output.write(' MATERIALIZED')
def CTEMaterializeDefault(self, node, output):
pass
def CTEMaterializeNever(self, node, output):
output.write(' NOT MATERIALIZED')
cte_materialize_printer = CTEMaterializedPrinter()
@node_printer('CommonTableExpr')
def common_table_expr(node, output):
output.print_name(node.ctename)
if node.aliascolnames:
output.write('(')
if len(node.aliascolnames) > 1:
output.space(2)
output.print_name(node.aliascolnames, ',')
output.write(')')
output.newline()
output.swrite('AS')
cte_materialize_printer(node.ctematerialized, node, output)
output.write(' (')
output.print_node(node.ctequery)
output.write(')')
output.newline()
@node_printer('ConstraintsSetStmt')
def constraints_set_stmt(node, output):
output.write('SET CONSTRAINTS ')
if node.constraints:
output.print_list(node.constraints)
output.write(' ')
else:
output.write('ALL ')
if node.deferred:
output.write('DEFERRED')
else:
output.write('IMMEDIATE')
@node_printer('CopyStmt')
def copy_stmt(node, output):
output.write('COPY ')
if node.relation:
output.print_node(node.relation)
if node.attlist:
output.write(' (')
output.print_list(node.attlist, are_names=True)
output.write(')')
if node.query:
output.write(' (')
with output.push_indent():
output.print_node(node.query)
output.write(')')
if node.is_from:
output.write(' FROM ')
else:
output.write(' TO ')
if node.is_program:
output.write('PROGRAM ')
if node.filename:
output.print_node(node.filename)
else:
if node.is_from:
output.write('STDIN')
else:
output.write('STDOUT')
if node.options:
output.newline()
output.write('WITH (')
output.print_list(node.options)
output.write(')')
if node.whereClause:
output.newline()
output.write('WHERE ')
output.print_node(node.whereClause)
@node_printer('CopyStmt', 'DefElem')
def copy_stmt_def_elem(node, output):
option = node.defname.value
argv = node.arg
if option == 'format':
output.write('FORMAT ')
output.print_symbol(argv)
elif option == 'freeze':
output.write('FREEZE')
if argv:
output.swrite(str(argv.val.value))
elif option == 'delimiter':
output.write('DELIMITER ')
output.print_node(argv)
elif option == 'null':
output.write('NULL ')
output.print_node(argv)
elif option == 'header':
output.write('HEADER')
if argv:
output.swrite(str(argv.val.value))
elif option == 'quote':
output.write('QUOTE ')
output.print_node(argv)
elif option == 'escape':
output.write('ESCAPE ')
output.print_node(argv)
elif option == 'force_quote':
output.write('FORCE_QUOTE ')
# If it is a list print it.
if isinstance(argv, List):
output.write('(')
output.print_list(argv, are_names=True)
output.write(')')
else:
output.write('* ')
elif option == 'force_null':
output.write('FORCE_NULL (')
output.print_list(argv, are_names=True)
output.write(')')
elif option == 'force_not_null':
output.write('FORCE_NOT_NULL (')
output.print_list(argv, are_names=True)
output.write(')')
elif option == 'encoding':
output.write('ENCODING ')
output.print_node(argv)
else:
raise NotImplementedError(option)
@node_printer('DeclareCursorStmt')
def declare_cursor_stmt(node, output):
output.write('DECLARE ')
output.print_name(node.portalname)
output.write(' ')
if node.options & enums.CURSOR_OPT_BINARY:
output.writes('BINARY')
if node.options & enums.CURSOR_OPT_SCROLL:
output.writes('SCROLL')
elif node.options & enums.CURSOR_OPT_NO_SCROLL:
output.writes('NO SCROLL')
if node.options & enums.CURSOR_OPT_INSENSITIVE:
output.writes('INSENSITIVE')
output.writes('CURSOR')
if node.options & enums.CURSOR_OPT_HOLD:
output.writes('WITH HOLD')
output.newline()
output.space(2)
output.write('FOR ')
with output.push_indent():
output.print_node(node.query)
@node_printer('DeleteStmt')
def delete_stmt(node, output):
with output.push_indent():
if node.withClause:
output.write('WITH ')
output.print_node(node.withClause)
output.newline()
output.space(2)
output.indent()
output.write('DELETE FROM ')
output.print_node(node.relation)
if node.usingClause:
output.newline()
output.write('USING ')
output.print_list(node.usingClause)
if node.whereClause:
output.newline()
output.write('WHERE ')
output.print_node(node.whereClause)
if node.returningList:
output.newline()
output.write('RETURNING ')
output.print_list(node.returningList)
if node.withClause:
output.dedent()
@node_printer('ExecuteStmt')
def execute_stmt(node, output):
output.write('EXECUTE ')
output.print_node(node.name, is_name=True)
if node.params:
output.write('(')
output.print_list(node.params)
output.write(')')
@node_printer('ExplainStmt')
def explain_stmt(node, output):
output.write('EXPLAIN ')
if node.options:
output.write('(')
output.print_list(node.options)
output.write(')')
output.newline()
output.space(2)
output.print_node(node.query)
@node_printer('ExplainStmt', 'DefElem')
def explain_stmt_def_elem(node, output):
output.print_symbol(node.defname)
if node.arg is not Missing:
output.write(' ')
output.print_symbol(node.arg)
class FetchDirectionPrinter(IntEnumPrinter):
enum = enums.FetchDirection
def FETCH_FORWARD(self, node, output):
if node.howMany == enums.FETCH_ALL:
output.write('ALL ')
elif node.howMany != 1:
output.write(f'FORWARD {node.howMany.value} ')
def FETCH_BACKWARD(self, node, output):
if node.howMany == enums.FETCH_ALL:
output.write('BACKWARD ALL ')
elif node.howMany != 1:
output.write(f'BACKWARD {node.howMany.value} ')
else:
output.write('PRIOR ')
def FETCH_ABSOLUTE(self, node, output):
if node.howMany == 1:
output.write('FIRST ')
elif node.howMany == -1:
output.write('LAST ')
else:
output.write(f'ABSOLUTE {node.howMany.value} ')
def FETCH_RELATIVE(self, node, output):
output.write(f'RELATIVE {node.howMany.value} ')
fetch_direction_printer = FetchDirectionPrinter()
@node_printer('FetchStmt')
def fetch_stmt(node, output):
output.write('MOVE ' if node.ismove else 'FETCH ')
fetch_direction_printer(node.direction, node, output)
output.print_name(node.portalname)
@node_printer('Float')
def float(node, output):
output.print_node(node.val)
@node_printer('FuncCall')
def func_call(node, output):
name = '.'.join(n.val.value for n in node.funcname)
special_printer = output.get_printer_for_function(name)
if special_printer is not None:
special_printer(node, output)
return
output.print_name(node.funcname)
output.write('(')
if node.agg_distinct:
output.writes('DISTINCT')
if node.args is Missing:
if node.agg_star:
output.write('*')
else:
if node.func_variadic:
if len(node.args) > 1:
output.print_list(node.args[:-1])
output.write(', ')
output.write('VARIADIC ')
output.print_node(node.args[-1])
else:
output.print_list(node.args)
if node.agg_order:
if not node.agg_within_group:
output.swrites('ORDER BY')
output.print_list(node.agg_order)
else:
output.writes(') WITHIN GROUP (ORDER BY')
output.print_list(node.agg_order)
output.write(')')
if node.agg_filter:
output.swrites('FILTER (WHERE')
output.print_node(node.agg_filter)
output.write(')')
if node.over:
output.swrite('OVER ')
output.print_node(node.over)
@node_printer('FuncCall', 'WindowDef')
def func_call_window_def(node, output):
if node.name:
output.print_name(node.name)
else:
window_def(node, output)
@node_printer('GroupingSet')
def grouping_set(node, output):
kind = node.kind
if kind == enums.GroupingSetKind.GROUPING_SET_CUBE:
output.write('CUBE (')
elif kind == enums.GroupingSetKind.GROUPING_SET_ROLLUP:
output.write('ROLLUP (')
elif kind == enums.GroupingSetKind.GROUPING_SET_SETS:
output.write('GROUPING SETS (')
elif kind == enums.GroupingSetKind.GROUPING_SET_EMPTY:
output.write('()')
return
elif kind == enums.GroupingSetKind.GROUPING_SET_SIMPLE:
# No idea how to reach this branch
output.write('SIMPLE (')
else: # pragma: no cover
raise NotImplementedError('Unhandled grouping set kind: %s' % kind)
output.print_list(node.content, ',')
output.write(')')
@node_printer('GroupingFunc')
def grouping_func(node, output):
output.write(' GROUPING(')
output.print_list(node.args)
output.write(')')
@node_printer('IndexElem')
def index_elem(node, output):
if node.name is not Missing:
output.print_name(node.name)
else:
output.write('(')
output.print_node(node.expr)
output.write(')')
if node.collation:
output.swrite('COLLATE ')
output.print_name(node.collation, ',')
if node.opclass:
output.write(' ')
output.print_name(node.opclass, '.')
if node.opclassopts:
output.write(' (')
output.print_list(node.opclassopts)
output.write(')')
if node.ordering != enums.SortByDir.SORTBY_DEFAULT:
if node.ordering == enums.SortByDir.SORTBY_ASC:
output.swrite('ASC')
elif node.ordering == enums.SortByDir.SORTBY_DESC:
output.swrite('DESC')
else: # pragma: no | |
<reponame>aflahelouneg/inverse_identification_soft_tissue
'''
Human skin hyper-elastic model parameter identification.
The fixed pad (right pad) is fixed at zero displacement; the moving pad (left
pad) is subjected to incremental displacement. The reaction force is measured
at the moving pad.
The objective is to fit a hyper-elastic material model in terms of the model
parameters so that the model observations (displacement field and reaction force)
and the expeimental measurements (displacement field and reaction force) are as
close as possible.
In post analysis, the sensitivity of the hyper-elastic model with respect
to the experimental measurements is assessed. This gives a measure of the
sensitivity and generates a field indicated with the model is most sensitive
to the measured data. In additions, a sensitivity field is generated to show
where (ideally) the measurements should have been take in order to for the fit
to be more accurate.
'''
import os
import sys
import math
import time
import logging
import numpy as np
import scipy.interpolate
import scipy.linalg as linalg
import matplotlib.pyplot as plt
import dolfin
from dolfin import Constant
from dolfin import DirichletBC
from dolfin import Expression
from dolfin import Function
from dolfin import assemble
import invsolve
import material
import examples.plotting
import examples.utility
from . import config
logger = logging.getLogger()
logger.setLevel(logging.INFO)
reload_module = examples.utility.reload_module
SimpleTimer = examples.utility.SimpleTimer
PROBLEM_DIRECTORY = os.path.dirname(os.path.relpath(__file__))
PROBLEM_NAME = os.path.basename(os.path.split(__file__)[0])
### Problem parameters
# PROBLEM_SUBCASE = "monolithic"
# MATERIAL_MODEL_NAMES = ["NeoHookean"]
PROBLEM_SUBCASE = "bimaterial"
MATERIAL_MODEL_NAMES = ["NeoHookean", "NeoHookean"]
ONLY_SOLVE_KELOID_MODEL_PARAMETERS = True
MESH_NAME_TAG = "5" # "1", "2", "3", "4", "5"
MAXIMUM_OBSERVATIONS = 10
# MAXIMUM_OBSERVATIONS = 3
FIXED_EXTERNAL_BOUNDARY = False
MAXIMUM_DISPLACEMENT = 2.0 # 3.0 # NOTE: # Maximum displacement is `4.112`
# MAXIMUM_DISPLACEMENT = 3.0
# MAXIMUM_DISPLACEMENT = 4.0
COMPUTE_INITIAL_COST = False
COMPUTE_FINAL_COST = True
COMPUTE_SENSITIVITIES = True
COMPUTE_REACTION_FORCE = True
COMPUTE_MISFIT_ERROR = True
COMPUTE_MISFIT_FIELD = True
COMPUTE_STRESS_FIELD = False
OPTIMIZE_FOREACH_OBSERVATION_TIME = False
OPTIMIZE_FORALL_OBSERVATION_TIMES = True
TEST_SENSITIVITY = False
TEST_SENSITIVITY_PROJECTION = False
ELEMENT_DEGREE = 1
MESHLESS_DEGREE = 2 # (3 is ill-conditioned)
MESHLESS_WEIGHT = "center" # "center", "uniform"
PLOT_RESULTS = True
SAVE_RESULTS = True
SAVE_FIGURE_EXTENSIONS = ('.png', '.svg', '.pdf')
if not (isinstance(MATERIAL_MODEL_NAMES, (list, tuple)) and \
all(isinstance(name_i, str) for name_i in MATERIAL_MODEL_NAMES)):
raise ValueError('Expected `MATERIAL_MODEL_NAMES` to be a sequence of `str`s.')
RESULTS_DIRECTORY_PARENT = os.path.join(
"results", PROBLEM_NAME + " (new)",
f"subcase({PROBLEM_SUBCASE})" +
f"-material({'_'.join(MATERIAL_MODEL_NAMES)})")
SAFE_TO_REMOVE_FILE_TYPES = \
('.txt', '.npy', '.pvd', '.vtu', '.png', '.svg', '.eps', '.pdf')
EPS = 1e-12
### Load mesh
if PROBLEM_SUBCASE == "monolithic":
from .monolithic import mesh as m_mesh
if MESH_NAME_TAG == "1": meshfiles_subdir = os.path.join('msh', '4528')
elif MESH_NAME_TAG == "2": meshfiles_subdir = os.path.join('msh', '6631')
elif MESH_NAME_TAG == "3": meshfiles_subdir = os.path.join('msh', '12023')
elif MESH_NAME_TAG == "4": meshfiles_subdir = os.path.join('msh', '26003')
elif MESH_NAME_TAG == "5": meshfiles_subdir = os.path.join('msh', '103672')
else: raise ValueError("`MESH_NAME_TAG`?")
elif PROBLEM_SUBCASE == "bimaterial":
from .bimaterial import mesh as m_mesh
if MESH_NAME_TAG == "1": meshfiles_subdir = os.path.join('msh', '4543')
elif MESH_NAME_TAG == "2": meshfiles_subdir = os.path.join('msh', '6760')
elif MESH_NAME_TAG == "3": meshfiles_subdir = os.path.join('msh', '12125')
elif MESH_NAME_TAG == "4": meshfiles_subdir = os.path.join('msh', '26615')
elif MESH_NAME_TAG == "5": meshfiles_subdir = os.path.join('msh', '105167')
else: raise ValueError("`MESH_NAME_TAG`?")
else:
raise ValueError("`PROBLEM_SUBCASE`?")
mesh_data = m_mesh.load_mesh(meshfiles_subdir)
mesh = mesh_data['mesh']
domain_markers = mesh_data['domain_markers']
boundary_markers = mesh_data['boundary_markers']
id_subdomains_material = mesh_data['id_subdomains_material']
id_subdomains_dic = mesh_data['id_subdomains_dic']
id_boundaries_pad_moving = mesh_data['id_boundaries_pad_moving']
id_boundaries_pad_fixed = mesh_data['id_boundaries_pad_fixed']
id_boundaries_exterior = mesh_data['id_boundaries_exterior']
# NOTE: The marker id's of material subdomains (`id_subdomains_material`) can be
# defined as a sequence of sequences's. In this case, `id_subdomains_material
# [I][J]` shall refer to the `I`th material, and the `J`th material subdomain.
# Alternatively, `id_subdomains_material` can be defined as a sequence of
# int's. In this case, `id_subdomains_material[I]` shall refer to the `I`th
# material in the `I`th subdomain. Some practical examples are as follows:
#
# 1) First material defined on subdomain `1`:
# id_subdomains_material = [(1,),] OR [1,]
# 2) First material defined on subdomains `1` and `2`
# id_subdomains_material = [(1,2),]
# 3) First material defined on subdomain `1`, second material -- `2`
# id_subdomains_material = [(1,),(2,)] OR [1, 2]
### Load measurements
if PROBLEM_SUBCASE == "monolithic":
from .monolithic.data.displacement import measurements as measurements_disp
from .monolithic.data.reactionforce import measurements as measurements_force
elif PROBLEM_SUBCASE == "bimaterial":
from .bimaterial.data.displacement import measurements as measurements_disp
from .bimaterial.data.reactionforce import measurements as measurements_force
else:
raise ValueError("`PROBLEM_SUBCASE`?")
x_dic = measurements_disp['x_dic']
u_dic = measurements_disp['u_dic']
u_dic_pad_moving = measurements_disp['u_pad_mov']
ux_msr_pad_moving = measurements_force['ux_pad_mov']
fx_msr_pad_moving = measurements_force['fx_pad_mov']
if not all(isinstance(mtx, np.ndarray) and mtx.ndim == 2 for mtx in u_dic):
raise TypeError('Expecting `u_dic` to be a sequence of 2D arrays.')
if not all(isinstance(vec, np.ndarray) and vec.ndim == 1 for vec in u_dic_pad_moving):
raise TypeError('Expecting `u_dic_pad_moving` to be a sequence of 1D arrays.')
### Synchronize force measurements with DIC
# Trim the measurement that contains too much data
if u_dic_pad_moving[-1][0] < ux_msr_pad_moving[-1]:
# Too much data at the end of `u_dic_pad_moving`
mask = u_dic_pad_moving[:,0] >= ux_msr_pad_moving[-1]
idx_end = np.flatnonzero(mask)[-1] + 2
u_dic_pad_moving = u_dic_pad_moving[:idx_end,:]
u_dic = u_dic[:idx_end]
elif ux_msr_pad_moving[-1] < u_dic_pad_moving[-1][0]:
# Too much data at the end of `ux_msr_pad_moving`
mask = ux_msr_pad_moving >= u_dic_pad_moving[-1,0]
idx_end = np.flatnonzero(mask)[-1] + 2
ux_msr_pad_moving = ux_msr_pad_moving[:idx_end]
fx_msr_pad_moving = fx_msr_pad_moving[:idx_end]
interp_fx_pad_moving = scipy.interpolate.interp1d(
ux_msr_pad_moving, fx_msr_pad_moving,
kind='linear', fill_value="extrapolate")
ux_msr_pad_moving = np.asarray(u_dic_pad_moving)[:,0]
fx_msr_pad_moving = interp_fx_pad_moving(ux_msr_pad_moving)
uy_msr_pad_moving = np.zeros_like(ux_msr_pad_moving)
fy_msr_pad_moving = np.zeros_like(fx_msr_pad_moving)
if PROBLEM_SUBCASE == "monolithic":
def coordinates_of_moving_pad():
return np.array([[32.0, 20.0]])
elif PROBLEM_SUBCASE == "bimaterial":
def coordinates_of_moving_pad():
'''Since the DIC coordinates do not match the mesh coordinates,
compute the required vector for offsetting the DIC coordinates.
Assuming the DIC coordinates are relative the moving pad.
Assuming the pads are equidistant from the mesh center.
'''
PAD_SEPARATION_DISTANCE = 4.072727e+01
# This number was estimated from DIC measurements.
# The precise pad separation distance is unknown.
x = mesh.coordinates(); x_mesh_center = (x.max(0) + x.min(0)) * 0.5
x_pad_center = x_mesh_center - [PAD_SEPARATION_DISTANCE * 0.5, 0.0]
return x_pad_center
else:
raise ValueError("`PROBLEM_SUBCASE`?")
# NOTE: All measurements should be full dimension. The relevant subdimension(s)
# can be specified in the definitions of the model cost and constraint(s).
measurement_x_dic = x_dic + coordinates_of_moving_pad()
measurement_u_dic = u_dic
measurement_u_bnd = np.stack([ux_msr_pad_moving, uy_msr_pad_moving], axis=1)
measurement_f_bnd = np.stack([fx_msr_pad_moving, fy_msr_pad_moving], axis=1)
num_measurements = len(measurement_f_bnd)
if not (num_measurements == len(measurement_u_bnd) == len(measurement_u_dic)):
raise RuntimeError('Numbers of measurements need to be the same.')
### Consider displacement measurements within bounds
if MAXIMUM_DISPLACEMENT is not None:
assert MAXIMUM_DISPLACEMENT > 0, "Expected a positive value."
ind = np.flatnonzero((measurement_u_bnd**2).sum(1)
> MAXIMUM_DISPLACEMENT**2)
if ind.size:
num_measurements = ind[0]
if num_measurements == 0:
raise RuntimeError
measurement_u_dic = measurement_u_dic[:num_measurements]
measurement_u_bnd = measurement_u_bnd[:num_measurements]
measurement_f_bnd = measurement_f_bnd[:num_measurements]
### Observation times
# NOTE: Prefer observation times as a sequence of indices.
# NOTE: Avoid time `0` if the deformation is zero.
model_observation_start = 1
model_observation_times = examples.utility.linspace_range(
first=model_observation_start, last=num_measurements-1,
count=min(num_measurements, MAXIMUM_OBSERVATIONS),
subrange="back") # "back" means `last` is inclusive
### Mark the DIC subdomain
def compute_measurement_markers_dic():
'''Mark the elements in the mesh that are overlain by DIC measurements.'''
p0 = measurement_x_dic.min(axis=0)
p1 = measurement_x_dic.max(axis=0)
tol = np.abs(p1-p0).max() * EPS
p0 -= tol
p1 += tol
measurement_markers_dic, id_measruement_markers_dic = \
examples.utility.mark_rectangular_subdomain(p0, p1, mesh)
id_measruement_markers_dic = (id_measruement_markers_dic,)
if not measurement_markers_dic.array().any():
raise RuntimeError('Null measurement markers')
return measurement_markers_dic, id_measruement_markers_dic
measurement_markers_dic, id_measruement_markers_dic = \
compute_measurement_markers_dic()
### Integration measures
dx = dolfin.dx(domain=mesh) # Entire domain
ds = dolfin.ds(domain=mesh) # Entire boundary
# Integration over material subdomains
dx_mat = tuple(dolfin.Measure('dx', mesh, subdomain_data=domain_markers,
subdomain_id=ids_mat_i) for ids_mat_i in id_subdomains_material)
# Integration over measurement subdomains: one subdomain
dx_msr = (dolfin.Measure('dx', mesh, subdomain_data=measurement_markers_dic,
subdomain_id=id_measruement_markers_dic),)
# Integration over the measurement boundary: at least one subdomain
ds_msr = (tuple(dolfin.Measure('ds', mesh, subdomain_data=boundary_markers,
subdomain_id=id) for id in id_boundaries_pad_moving),)
# NOTE: `ds_msr` generally contains boundary measures that are split among
# different material subdomains. This is necessary for the integration
# of the observed traction since it may be defined on several materials.
domain_size = assemble(1*dx)
if abs(sum(assemble(1*dx_i) for dx_i in dx_mat)-domain_size) > domain_size*EPS:
raise RuntimeError('Material domain(s) do not constitute geometric domain.')
if any(assemble(1*dx_i) < EPS for dx_i in dx_msr):
raise RuntimeError('Zero-size measurement subdomain.')
if any(assemble(1*ds_ij) < EPS for ds_i in ds_msr for ds_ij in ds_i):
raise RuntimeError('Zero-size measurement boundary.')
### Function spaces
# element_u = dolfin.VectorElement("CG", mesh.ufl_cell(), 1)
# element_p = dolfin.FiniteElement("CG", mesh.ufl_cell(), 1)
# mixed_element = dolfin.MixedElement([element_u, element_p])
# V = FunctionSpace(mesh, mixed_element)
# V_obs = FunctionSpace(mesh, element_u)
V = dolfin.VectorFunctionSpace(mesh, 'CG', ELEMENT_DEGREE) # for primary field
S = dolfin.FunctionSpace(mesh, 'CG', ELEMENT_DEGREE) # for generic scalar fields
W = dolfin.TensorFunctionSpace(mesh, 'DG', 0) # for tensor fields (e.g. stresses)
# NOTE: `V` is generally a mixed function space that accounts for all fields,
# e.g. displacement field, hydro-static pressure field, etc. `V_obs`,
# on the other hand, must just account for the displacement field.
V_obs = V
# Primary field
u = Function(V)
### Model parameters
# NOTE: Model parameters consist of material parameters any any auxiliary
# parameters (e.g. constraint multpliers)
#
# NOTE: Model parameters that are exclusively `dolfin.Constant`s are the only
# parameters that will be optimized.
#
# NOTE: Material parameters should be a `list` of `dict`s so that each `dict`
# may refer to a particular material subdomain.
material_classes = []
material_parameters = []
if PROBLEM_SUBCASE == "monolithic":
if MATERIAL_MODEL_NAMES[0] == "NeoHookean":
material_classes.append(material.NeoHookean)
if FIXED_EXTERNAL_BOUNDARY:
raise NotImplementedError
else:
if MAXIMUM_DISPLACEMENT == 3.0:
if MESH_NAME_TAG == "1": model_parameter_init | |
to be greater or equal to the maximum length of an element of
``vectors``.
value: a value used for padding
Returns:
an array of padded vectors
"""
result = []
for v in vectors:
result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)]))
return np.stack(result)
class BertPunctuationCapitalizationDataset(Dataset):
"""
A dataset to use during training for punctuation and capitalization tasks.
For inference, you will need
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`.
For huge datasets which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text
without punctuation and capitalization
labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word
labels for a sentence in the ``text_file``. Labels have to follow format described in this section of
documentation :ref:`NeMo Data Format<nemo-data-format-label>`.
max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS]
and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the
sequence.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``,
``eos_id``.
num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the
dataset. If ``-1``, use all dataset. Useful for testing.
tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including
paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not
samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input
sequences are short, then a batch will contain more samples. Before packing into batches, samples are
sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch
significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content.
Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch. If
parameter ``number_of_batches_is_multiple_of`` is greater than 1, some batches may be split into smaller
pieces.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral
label both for punctuation and capitalization.
punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set,
use label ids generated during training to support cases when not all labels are present in the dev set.
For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on
tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized
into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is
``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens
in the loss_mask.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features already present
in ``cache_dir`` or not. If pickled features file does not exist or ``use_cache=False``, then features are
pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in
words), encoded punctuation and capitalization labels, label ids. Features creation consumes considerable
time and this ``use_cache=True`` significantly speeds up training starting. Pickled features are also
used for sharing features between processes if data parallel training is used.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features)
is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset
directory is read-only and you wish to pickle features. In such a case specify a path to directory which
allows writing in ``cache_dir`` parameter.
get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label
frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory.
label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies
are saved. Be default a ``text_file`` parent directory is used. When method
:meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir``
directory. This parameter is useful if directory containing ``text_file`` is read-only.
punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing
punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first
line has to contain `pad_label`, otherwise error will be raised.
capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add
``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred
dataset and can NOT be used during model training and inference.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and
other useful information.
n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding
labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed
without multiprocessing. By default ``n_jobs`` is ``0``.
.. warning::
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
number_of_batches_is_multiple_of (:obj:`int`, `optional`, defaults to :obj:`1`): number of batches in the
dataset is made divisible by ``number_of_batches_is_multiple_of``. If ``number_of_batches_is_multiple_of``
is greater than 1, then several batches are split in parts until number of batches
is divisible by ``number_of_batches_is_multiple_of``. If there is no enough queries in the dataset to
create enough batches, then a warning is printed. This parameter is useful for dev and validation datasets
if multiple GPUs are used. The problem is that if number of batches is not evenly divisible by number of
GPUs, then some queries may be processed several times and metrics will be distorted.
batch_shuffling_random_seed (:obj:`int`, defaults to :obj:`int`): a random seed used for batches repacking and
shuffling.
tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization
progress. Useful for creation of tarred dataset
batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
deciding which samples batches will contain. Useful for creation of tarred dataset
batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
batch creation (stacking and padding). Useful for creation of tarred dataset
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
tokens_in_batch: int = 5000,
pad_label: str = 'O',
punct_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
capit_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
use_cache: bool = True,
cache_dir: Optional[Union[str, os.PathLike]] = None,
get_label_frequencies: bool = False,
label_info_save_dir: Optional[Union[str, os.PathLike]] = None,
punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
add_masks_and_segment_ids_to_batch: bool = True,
verbose: bool = True,
n_jobs: Optional[int] = 0,
number_of_batches_is_multiple_of: int = 1,
batch_shuffling_random_seed: int = 42,tokenization_progress_queue: Optional[mp.Queue] = None,
batch_mark_up_progress_queue: Optional[mp.Queue] = None,
batch_building_progress_queue: Optional[mp.Queue] = None,
use_features: bool = True
) -> None:
""" Initializes BertPunctuationCapitalizationDataset. """
if isinstance(punct_label_ids, DictConfig):
punct_label_ids = OmegaConf.to_container(punct_label_ids)
if isinstance(capit_label_ids, DictConfig):
capit_label_ids = OmegaConf.to_container(capit_label_ids)
self._check_constructor_parameters(
text_file,
labels_file,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
num_samples,
use_cache,
number_of_batches_is_multiple_of,
)
if punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_vocab_file is not None:
capit_label_vocab_file = Path(capit_label_vocab_file).expanduser()
capit_label_ids = load_label_ids(capit_label_vocab_file)
self.text_file, self.labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
if label_info_save_dir is None:
self.label_info_save_dir = self.text_file.parent
else:
self.label_info_save_dir = Path(label_info_save_dir).expanduser()
self.tokens_in_batch = tokens_in_batch
self.tokenizer = tokenizer
self.pad_label = pad_label
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch
self.verbose = verbose
self.batch_mark_up_progress_queue = batch_mark_up_progress_queue
self.batch_building_progress_queue = batch_building_progress_queue
self.use_features = use_features
master_device = is_global_rank_zero()
self.features_pkl = self._get_path_to_pkl_features(self.text_file, cache_dir, max_seq_length, num_samples)
features = None
if master_device and not (self.features_pkl.is_file() and use_cache):
if verbose:
logging.info(f'Processing {self.text_file}')
res = self._read_dataset(self.text_file, self.labels_file, num_samples)
text_lines, punct_label_lines, capit_label_lines, punct_unique_labels, capit_unique_labels = res
if punct_label_ids:
self._check_label_ids_vs_unique_labels(
| |
check to user
mex = 'This is your netlist:\n\n'
with open(fname) as f:
for line in f:
mex += line
context.bot.send_message(chat_id=update.message.chat_id, text=mex)
# compute solution
net, mex = get_solution(fname, update, context)
# typing
context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING)
if mex is None: # in case of .tran or .ac-multi-freq mex is none, hence send the plot
if net.analysis[0].lower() == '.tran':
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open('./users/tran_plot_' + str(update.message.chat_id) + '.png', 'rb'))
elif net.analysis[0].lower() == '.ac':
N = int(len(net.tf_cmd.split()[1:]) / 2)
if N == 1:
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open('./users/bode_plot_' + str(update.message.chat_id) + '.png', 'rb'))
else:
for k in range(N):
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open(
'./users/bode_plot_' + str(update.message.chat_id) + '_' + str(k) + '.png',
'rb'))
else: # otherwise print results
mex = 'Please remember that all components are analyzed with *passive sign convention*.\nHere you have ' \
'*the circuit solution*.\n\n' + mex
context.bot.send_message(chat_id=update.message.chat_id, text=mex,
parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)
# ==========================
# help - short guide
# ==========================
@block_group
def help(update, context):
"""
'help' provides information about the use of the bot
:param update: bot update
:param context: CallbackContext
:return: None
"""
msg = "*Very short guide*.\n\n" #1)upload a file with the netlist (don't know what a netlist is? Run `/tutorial` in the bot)\n2) enjoy\n\n\n*If you need a more detailed guide*\nRun `/tutorial` in the bot"
msg += "The Bot makes use of netlists to describe circuits. If you do not know what "
msg += "a netlist is, please refer to SpicePy "
msg += "[documentation](https://github.com/giaccone/SpicePy/wiki/User's-guide)"
msg += " and [examples](https://github.com/giaccone/SpicePy/wiki/Examples).\n\n"
msg += "Assuming that you know how to describe a circuit by means of a netlist, you can either:\n\n"
msg += "1) use the command `/netlist` and write the netlist directly to the Bot (i.e. chatting with the BOT)\n\n"
msg += "or\n\n"
msg += "2) send a text file to the Bot including the netlist. The Bot will catch it and it'll solve it.\n\n"
msg += "*Finally*\n"
msg += "read the full [tutorial](https://github.com/giaccone/SpicePyBot/wiki) if "
msg += "you are completely new to this subject."
context.bot.send_message(chat_id=update.message.chat_id,
text=msg,
parse_mode=telegram.ParseMode.MARKDOWN, disable_web_page_preview=True)
# =========================================
# netlist - write te netlist in the BOT
# =========================================
@block_group
def netlist(update, context):
"""
'netlist' tell to the bot that the used intend to send a netlist via text message
:param update: bot update
:param context: CallbackContext
:return: None
"""
# if current user don't have cnf file create it
if not os.path.exists('./users/' + str(update.message.chat_id) + '.cnf'):
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write('False\n') # this is for the node potential
fid.write('False\n') # this is for the polar flag
fid.write('False') # this is for the decibel flag
fid.close()
open("./users/" + str(update.message.chat_id) + "_waitnetlist", 'w').close()
context.bot.send_message(chat_id=update.message.chat_id, text="Please write the netlist\nAll in one message.")
# =========================================
# reply - catch any message and reply to it
# =========================================
@block_group
def reply(update, context):
"""
'reply' provides the result to a netlist send via text message. If /netlist is not
used before sending the netlist, a funny message is sent.
:param update: bot update
:param context: CallbackContext
:return: None
"""
# check call to /netlist
if os.path.exists("./users/" + str(update.message.chat_id) + "_waitnetlist"):
# write the netlist
fname = "./users/" + str(update.message.chat_id) + ".txt"
fid = open(fname, "w")
fid.write(str(update.message.text) + '\n')
fid.close()
# remove waitnetlist file for this user
os.remove("./users/" + str(update.message.chat_id) + "_waitnetlist")
# send the netlist for double check to user
mex = 'This is your netlist:\n\n'
with open(fname) as f:
for line in f:
mex += line
context.bot.send_message(chat_id=update.message.chat_id, text=mex)
# compute solution
net, mex = get_solution(fname, update, context)
# typing
context.bot.send_chat_action(chat_id=update.message.chat_id, action=telegram.ChatAction.TYPING)
if mex is None: # in case of .tran or .ac-multi-freq mex is none, hence send the plot
if net.analysis[0].lower() == '.tran':
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open('./users/tran_plot_' + str(update.message.chat_id) + '.png', 'rb'))
elif net.analysis[0].lower() == '.ac':
N = int(len(net.tf_cmd.split()[1:]) / 2)
if N == 1:
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open('./users/bode_plot_' + str(update.message.chat_id) + '.png', 'rb'))
else:
for k in range(N):
context.bot.send_photo(chat_id=update.message.chat_id,
photo=open(
'./users/bode_plot_' + str(update.message.chat_id) + '_' + str(k) + '.png',
'rb'))
else: # otherwise print results
mex = 'Please remember that all components are analyzed with *passive sign convention*.\nHere you have ' \
'*the circuit solution*.\n\n' + mex
context.bot.send_message(chat_id=update.message.chat_id, text=mex,
parse_mode=telegram.ParseMode.MARKDOWN)
else: # ironic answer if the user send a random mesage to the Bot
update.message.reply_text("Come on! We are here to solve circuits and not to chat! 😀\n"
"Please provide me a netlist.", quote=True)
# =========================================
# complex_repr - toggle polar/cartesian
# =========================================
@block_group
def complex_repr(update, context):
"""
'complex_repr' switch from cartesian to polar representation for a complex number
:param update: bot update
:param context: CallbackContext
:return: None
"""
if os.path.exists('./users/' + str(update.message.chat_id) + '.cnf'):
# get configurations
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'r')
flag = fid.readline()[:-1] # read nodal_pot conf
nodal_pot = flag == 'True'
flag = fid.readline()[:-1] # read polar conf
polar = flag == 'True'
flag = fid.readline() # read dB conf
dB = flag == 'True'
# keep nodal pot and toggle polar
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write(str(nodal_pot) + '\n')
fid.write(str(not polar) + '\n')
fid.write(str(dB))
fid.close()
else:
polar = False
# Initialize config file with polar = True (everything else False)
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write('False\n') # this is for the node potential
fid.write(str(not polar) + '\n') # this is for the polar flag
fid.write('False') # this is for the decibel flag
fid.close()
# notify user
if polar:
context.bot.send_message(chat_id=update.message.chat_id, text="Switched to cartesian representation")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Switched to polar representation")
# =========================================
# nodal_pot - toggle node potentials in output
# =========================================
@block_group
def nodal_pot(update, context):
"""
'nodal_pot' enable/disable node potentials in the results
:param update: bot update
:param context: CallbackContext
:return: None
"""
if os.path.exists('./users/' + str(update.message.chat_id) + '.cnf'):
# get configurations
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'r')
flag = fid.readline()[:-1] # read nodal_pot conf
nodal_pot = flag == 'True'
flag = fid.readline()[:-1] # read polar conf
polar = flag == 'True'
flag = fid.readline() # read dB conf
dB = flag == 'True'
# switch nodal pot keep polar
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write(str(not nodal_pot) + '\n')
fid.write(str(polar) + '\n')
fid.write(str(dB))
fid.close()
else:
nodal_pot = False
# Initialize config file with nodal_pot = True (everything else False)
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write(str(not nodal_pot) + '\n') # this is for the node potential
fid.write('False\n') # this is for the polar flag
fid.write('False') # this is for the decibel flag
fid.close()
# notify user
if nodal_pot:
context.bot.send_message(chat_id=update.message.chat_id, text="Node potentials removed from results")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="Node potentials included in results")
# =========================================
# decibel - toggle decibel in bode plot
# =========================================
@block_group
def decibel(update, context):
"""
'decibel' enable/disable decibel representation in Bode plots
:param update: bot update
:param context: CallbackContext
:return: None
"""
if os.path.exists('./users/' + str(update.message.chat_id) + '.cnf'):
# get configurations
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'r')
flag = fid.readline()[:-1] # read nodal_pot conf
nodal_pot = flag == 'True'
flag = fid.readline()[:-1] # read polar conf
polar = flag == 'True'
flag = fid.readline() # read dB conf
dB = flag == 'True'
# switch nodal pot keep polar
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write(str(nodal_pot) + '\n')
fid.write(str(polar) + '\n')
fid.write(str(not dB))
fid.close()
else:
dB = False
# Initialize config file with dB = True (everything else False)
fname = './users/' + str(update.message.chat_id) + '.cnf'
fid = open(fname, 'w')
fid.write('False\n') # this is for the node potential
fid.write('False\n') # this is for the polar flag
fid.write(str(not dB)) # this is for the decibel flag
fid.close()
# notify user
if dB:
context.bot.send_message(chat_id=update.message.chat_id, text="bode plot: decibel disabled")
else:
context.bot.send_message(chat_id=update.message.chat_id, text="bode plot: decibel enabled")
# =========================================
# log - get log
# =========================================
@block_group
@restricted
def log(update, context):
"""
'log' sends log files in the chat
:param update: bot update
:param context: CallbackContext
:return: None
"""
context.bot.send_document(chat_id=update.message.chat_id, document=open('./SolverLog.log', 'rb'))
context.bot.send_document(chat_id=update.message.chat_id, document=open('./OtherLog.log', 'rb'))
# =========================================
# stat - get stat
# =========================================
@block_group
@restricted
def stat(update, context):
"""
'stat' computes statistical information about the bot use
:param update: | |
: "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.32.2.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"local_user0" : {
"nodetype" : "namednumber",
"number" : "0"
},
"local_user1" : {
"nodetype" : "namednumber",
"number" : "1"
},
"local_user2" : {
"nodetype" : "namednumber",
"number" : "2"
},
"local_user3" : {
"nodetype" : "namednumber",
"number" : "3"
},
"local_user4" : {
"nodetype" : "namednumber",
"number" : "4"
},
"local_user5" : {
"nodetype" : "namednumber",
"number" : "5"
},
"local_user6" : {
"nodetype" : "namednumber",
"number" : "6"
},
"local_user7" : {
"nodetype" : "namednumber",
"number" : "7"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"sysLogServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.32.3",
"status" : "current",
"description" :
"""""",
}, # table
"sysLogServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.1.5.8.16.32.3.1",
"create" : "true",
"status" : "current",
"linkage" : [
"sysLogServerAddress",
],
"description" :
"""An entry in sysLogServerTable.""",
}, # row
"sysLogServerAddress" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.32.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"sysLogServerLogLevel" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.32.3.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"level0" : {
"nodetype" : "namednumber",
"number" : "0"
},
"level0-1" : {
"nodetype" : "namednumber",
"number" : "1"
},
"level0-2" : {
"nodetype" : "namednumber",
"number" : "2"
},
"level0-3" : {
"nodetype" : "namednumber",
"number" : "3"
},
"level0-4" : {
"nodetype" : "namednumber",
"number" : "4"
},
"level0-5" : {
"nodetype" : "namednumber",
"number" : "5"
},
"level0-6" : {
"nodetype" : "namednumber",
"number" : "6"
},
"level0-7" : {
"nodetype" : "namednumber",
"number" : "7"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"sysLogServerRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.32.3.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33",
}, # node
"globalDhcpRelay" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1",
}, # node
"globalDhcpRelayEnable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayOption82Enable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayInfoEnable" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"globalDhcpRelayInfoData" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"maxNumberOfGlobalDhcpRelayRemoteServer" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"globalDhcpRelayRemoteServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.1.6",
"status" : "current",
"description" :
"""""",
}, # table
"globalDhcpRelayRemoteServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.1.6.1",
"create" : "true",
"status" : "current",
"linkage" : [
"globalDhcpRelayRemoteServerIp",
],
"description" :
"""An entry in globalDhcpRelayRemoteServerTable.""",
}, # row
"globalDhcpRelayRemoteServerIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.1.6.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"globalDhcpRelayRemoteServerRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.1.6.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpRelay" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.3",
}, # node
"dhcpRelayInfoData" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.3.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"maxNumberOfDhcpRelay" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.3.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The maximum number of DHCP relay entries that can be created.
A value of 0 for this object implies that there exists settings for
global DHCP relay.""",
}, # scalar
"maxNumberOfDhcpRelayRemoteServer" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"dhcpRelayRemoteServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.4",
"status" : "current",
"description" :
"""""",
}, # table
"dhcpRelayRemoteServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.4.1",
"create" : "true",
"status" : "current",
"linkage" : [
"dhcpRelayVid",
"dhcpRelayRemoteServerIp",
],
"description" :
"""An entry in dhcpRelayRemoteServerTable.""",
}, # row
"dhcpRelayVid" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.4.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"dhcpRelayRemoteServerIp" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.4.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"dhcpRelayRemoteServerRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.1.5.8.16.33.3.4.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpRelayTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.5",
"status" : "current",
"description" :
"""""",
}, # table
"dhcpRelayEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.33.3.5.1",
"status" : "current",
"linkage" : [
"dhcpRelayVid",
],
"description" :
"""An entry in dhcpRelayTable.""",
}, # row
"dhcpRelayOption82Enable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.3.5.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"dhcpRelayInfoEnable" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.17.32.33.3.5.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"radiusServerSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.34",
}, # node
"radiusAuthServerSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.34.1",
}, # node
"radiusAuthServerTimeout" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.16.34.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"radiusAuthServerTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.172.16.17.32.34.1.3",
"status" : "current",
"description" :
"""""",
}, # table
"radiusAuthServerEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.1.5.8.16.34.1.3.1",
"status" : "current",
"linkage" : [
"radiusAuthServerIndex",
],
"description" :
"""An entry in radiusAuthServerTable.""",
}, # row
"radiusAuthServerIndex" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
"oid" : "192.168.3.11.4.1.890.1.5.8.16.34.1.3.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "noaccess",
"description" :
"""""",
}, # column
"radiusAuthServerIpAddr" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-ES2024A-MIB",
| |
0
despecklingdone = False
for despecklepass in range(optiondict["despeckle_passes"]):
LGR.info(f"\n\nCorrelation despeckling subpass {despecklepass + 1}")
outmaparray *= 0.0
outmaparray[validvoxels] = eval("lagtimes")[:]
medianlags = ndimage.median_filter(
outmaparray.reshape(nativespaceshape), 3
).reshape(numspatiallocs)
initlags = np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
-1000000.0,
)[validvoxels]
if len(initlags) > 0:
if len(np.where(initlags != -1000000.0)[0]) > 0:
voxelsprocessed_thispass = fitcorr_func(
genlagtc,
initial_fmri_x,
lagtc,
trimmedcorrscale,
thefitter,
corrout,
fitmask,
failreason,
lagtimes,
lagstrengths,
lagsigma,
gaussout,
windowout,
R2,
peakdict=thepeakdict,
nprocs=optiondict["nprocs_fitcorr"],
alwaysmultiproc=optiondict["alwaysmultiproc"],
fixdelay=optiondict["fixdelay"],
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
despeckle_thresh=optiondict["despeckle_thresh"],
initiallags=initlags,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
voxelsprocessed_fc_ds += voxelsprocessed_thispass
optiondict[
"despecklemasksize_pass" + str(thepass) + "_d" + str(despecklepass + 1)
] = voxelsprocessed_thispass
optiondict[
"despecklemaskpct_pass" + str(thepass) + "_d" + str(despecklepass + 1)
] = (100.0 * voxelsprocessed_thispass / optiondict["corrmasksize"])
else:
despecklingdone = True
else:
despecklingdone = True
if despecklingdone:
LGR.info("Nothing left to do! Terminating despeckling")
break
if optiondict["savedespecklemasks"] and thepass == optiondict["passes"]:
theheader = copy.deepcopy(nim_hdr)
theheader["dim"][4] = 1
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-despeckle_mask"
else:
savename = f"{outputname}_despecklemask"
if not fileiscifti:
theheader["dim"][0] = 3
tide_io.savetonifti(
(
np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
0.0,
)
).reshape(nativespaceshape),
theheader,
savename,
)
else:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = 1
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
(
np.where(
np.abs(outmaparray - medianlags) > optiondict["despeckle_thresh"],
medianlags,
0.0,
)
),
cifti_hdr,
theheader,
savename,
isseries=False,
names=["despecklemask"],
)
LGR.info(
f"\n\n{voxelsprocessed_fc_ds} voxels despeckled in "
f"{optiondict['despeckle_passes']} passes"
)
TimingLGR.info(
f"Correlation despeckle end, pass {thepass}",
{
"message2": voxelsprocessed_fc_ds,
"message3": "voxels",
},
)
# Step 3 - regressor refinement for next pass
if thepass < optiondict["passes"] or optiondict["convergencethresh"] is not None:
LGR.info(f"\n\nRegressor refinement, pass {thepass}")
TimingLGR.info(f"Regressor refinement start, pass {thepass}")
if optiondict["refineoffset"]:
peaklag, peakheight, peakwidth = tide_stats.gethistprops(
lagtimes[np.where(fitmask > 0)],
optiondict["histlen"],
pickleft=optiondict["pickleft"],
peakthresh=optiondict["pickleftthresh"],
)
optiondict["offsettime"] = peaklag
optiondict["offsettime_total"] += peaklag
LGR.info(
f"offset time set to {optiondict['offsettime']:.3f}, "
f"total is {optiondict['offsettime_total']:.3f}"
)
# regenerate regressor for next pass
refineregressor_func = addmemprofiling(
tide_refine.refineregressor,
optiondict["memprofile"],
"before refineregressor",
)
(
voxelsprocessed_rr,
outputdata,
refinemask,
locationfails,
ampfails,
lagfails,
sigmafails,
) = refineregressor_func(
fmri_data_valid,
fmritr,
shiftedtcs,
weights,
thepass,
lagstrengths,
lagtimes,
lagsigma,
fitmask,
R2,
theprefilter,
optiondict,
bipolar=optiondict["bipolar"],
padtrs=numpadtrs,
includemask=internalrefineincludemask_valid,
excludemask=internalrefineexcludemask_valid,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
optiondict["refinemasksize_pass" + str(thepass)] = voxelsprocessed_rr
optiondict["refinemaskpct_pass" + str(thepass)] = (
100.0 * voxelsprocessed_rr / optiondict["corrmasksize"]
)
optiondict["refinelocationfails_pass" + str(thepass)] = locationfails
optiondict["refineampfails_pass" + str(thepass)] = ampfails
optiondict["refinelagfails_pass" + str(thepass)] = lagfails
optiondict["refinesigmafails_pass" + str(thepass)] = sigmafails
if voxelsprocessed_rr > 0:
normoutputdata = tide_math.stdnormalize(theprefilter.apply(fmrifreq, outputdata))
normunfilteredoutputdata = tide_math.stdnormalize(outputdata)
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-refinedmovingregressor_timeseries",
normunfilteredoutputdata,
1.0 / fmritr,
columns=["unfiltered_pass" + str(thepass)],
append=(thepass > 1),
)
tide_io.writebidstsv(
f"{outputname}_desc-refinedmovingregressor_timeseries",
normoutputdata,
1.0 / fmritr,
columns=["filtered_pass" + str(thepass)],
append=True,
)
else:
tide_io.writenpvecs(
normoutputdata,
f"{outputname}_refinedregressor_pass" + str(thepass) + ".txt",
)
tide_io.writenpvecs(
normunfilteredoutputdata,
f"{outputname}_unfilteredrefinedregressor_pass" + str(thepass) + ".txt",
)
# check for convergence
regressormse = mse(normoutputdata, previousnormoutputdata)
optiondict["regressormse_pass" + str(thepass).zfill(2)] = regressormse
LGR.info(f"regressor difference at end of pass {thepass:d} is {regressormse:.6f}")
if optiondict["convergencethresh"] is not None:
if thepass >= optiondict["maxpasses"]:
LGR.info("refinement ended (maxpasses reached)")
stoprefining = True
refinestopreason = "maxpassesreached"
elif regressormse < optiondict["convergencethresh"]:
LGR.info("refinement ended (refinement has converged")
stoprefining = True
refinestopreason = "convergence"
else:
stoprefining = False
elif thepass >= optiondict["passes"]:
stoprefining = True
refinestopreason = "passesreached"
else:
stoprefining = False
if optiondict["detrendorder"] > 0:
resampnonosref_y = tide_fit.detrend(
tide_resample.doresample(
initial_fmri_x,
normoutputdata,
initial_fmri_x,
method=optiondict["interptype"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
resampref_y = tide_fit.detrend(
tide_resample.doresample(
initial_fmri_x,
normoutputdata,
os_fmri_x,
method=optiondict["interptype"],
),
order=optiondict["detrendorder"],
demean=optiondict["dodemean"],
)
else:
resampnonosref_y = tide_resample.doresample(
initial_fmri_x,
normoutputdata,
initial_fmri_x,
method=optiondict["interptype"],
)
resampref_y = tide_resample.doresample(
initial_fmri_x,
normoutputdata,
os_fmri_x,
method=optiondict["interptype"],
)
if optiondict["tmaskname"] is not None:
resampnonosref_y *= tmask_y
thefit, R = tide_fit.mlregress(tmask_y, resampnonosref_y)
resampnonosref_y -= thefit[0, 1] * tmask_y
resampref_y *= tmaskos_y
thefit, R = tide_fit.mlregress(tmaskos_y, resampref_y)
resampref_y -= thefit[0, 1] * tmaskos_y
# reinitialize lagtc for resampling
previousnormoutputdata = normoutputdata + 0.0
genlagtc = tide_resample.FastResampler(
initial_fmri_x, normoutputdata, padtime=padtime
)
nonosrefname = "_reference_fmrires_pass" + str(thepass + 1) + ".txt"
osrefname = "_reference_resampres_pass" + str(thepass + 1) + ".txt"
(
optiondict["kurtosis_reference_pass" + str(thepass + 1)],
optiondict["kurtosisz_reference_pass" + str(thepass + 1)],
optiondict["kurtosisp_reference_pass" + str(thepass + 1)],
) = tide_stats.kurtosisstats(resampref_y)
if not stoprefining:
if optiondict["bidsoutput"]:
tide_io.writebidstsv(
f"{outputname}_desc-movingregressor_timeseries",
tide_math.stdnormalize(resampnonosref_y),
1.0 / fmritr,
columns=["pass" + str(thepass + 1)],
append=True,
)
tide_io.writebidstsv(
f"{outputname}_desc-oversampledmovingregressor_timeseries",
tide_math.stdnormalize(resampref_y),
oversampfreq,
columns=["pass" + str(thepass + 1)],
append=True,
)
else:
tide_io.writenpvecs(
tide_math.stdnormalize(resampnonosref_y),
outputname + nonosrefname,
)
tide_io.writenpvecs(
tide_math.stdnormalize(resampref_y), outputname + osrefname
)
else:
LGR.warning(f"refinement failed - terminating at end of pass {thepass}")
stoprefining = True
refinestopreason = "emptymask"
TimingLGR.info(
f"Regressor refinement end, pass {thepass}",
{
"message2": voxelsprocessed_rr,
"message3": "voxels",
},
)
if optiondict["saveintermediatemaps"]:
maplist = [
("lagtimes", "maxtime"),
("lagstrengths", "maxcorr"),
("lagsigma", "maxwidth"),
("fitmask", "fitmask"),
("failreason", "corrfitfailreason"),
]
if thepass < optiondict["passes"]:
maplist.append(("refinemask", "refinemask"))
for mapname, mapsuffix in maplist:
if optiondict["memprofile"]:
memcheckpoint(f"about to write {mapname} to {mapsuffix}")
else:
tide_util.logmem(f"about to write {mapname} to {mapsuffix}")
outmaparray[:] = 0.0
outmaparray[validvoxels] = eval(mapname)[:]
if optiondict["textio"]:
tide_io.writenpvecs(
outmaparray.reshape(nativespaceshape, 1),
f"{outputname}_{mapsuffix}{passsuffix}.txt",
)
else:
if optiondict["bidsoutput"]:
bidspasssuffix = f"_intermediatedata-pass{thepass}"
if mapname == "fitmask":
savename = f"{outputname}{bidspasssuffix}_desc-corrfit_mask"
elif mapname == "failreason":
savename = f"{outputname}{bidspasssuffix}_desc-corrfitfailreason_info"
else:
savename = f"{outputname}{bidspasssuffix}_desc-{mapsuffix}_map"
bidsdict = bidsbasedict.copy()
if mapname == "lagtimes" or mapname == "lagsigma":
bidsdict["Units"] = "second"
tide_io.writedicttojson(bidsdict, f"{savename}.json")
else:
savename = f"{outputname}_{mapname}" + passsuffix
tide_io.savetonifti(outmaparray.reshape(nativespaceshape), theheader, savename)
# We are done with refinement.
if optiondict["convergencethresh"] is None:
optiondict["actual_passes"] = optiondict["passes"]
else:
optiondict["actual_passes"] = thepass - 1
optiondict["refinestopreason"] = refinestopreason
# Post refinement step -1 - Coherence calculation
if optiondict["calccoherence"]:
TimingLGR.info("Coherence calculation start")
LGR.info("\n\nCoherence calculation")
reportstep = 1000
# make the Coherer
theCoherer = tide_classes.Coherer(
Fs=(1.0 / fmritr),
reftc=cleaned_nonosreferencetc,
freqmin=0.0,
freqmax=0.2,
ncprefilter=theprefilter,
windowfunc=optiondict["windowfunc"],
detrendorder=optiondict["detrendorder"],
debug=False,
)
theCoherer.setreftc(cleaned_nonosreferencetc)
(
coherencefreqstart,
dummy,
coherencefreqstep,
coherencefreqaxissize,
) = theCoherer.getaxisinfo()
if optiondict["textio"]:
nativecoherenceshape = (xsize, coherencefreqaxissize)
else:
if fileiscifti:
nativecoherenceshape = (1, 1, 1, coherencefreqaxissize, numspatiallocs)
else:
nativecoherenceshape = (xsize, ysize, numslices, coherencefreqaxissize)
internalvalidcoherenceshape = (numvalidspatiallocs, coherencefreqaxissize)
internalcoherenceshape = (numspatiallocs, coherencefreqaxissize)
# now allocate the arrays needed for the coherence calculation
if optiondict["sharedmem"]:
coherencefunc, dummy, dummy = allocshared(internalvalidcoherenceshape, rt_outfloatset)
coherencepeakval, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
coherencepeakfreq, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
else:
coherencefunc = np.zeros(internalvalidcoherenceshape, dtype=rt_outfloattype)
coherencepeakval, dummy, dummy = allocshared(numvalidspatiallocs, rt_outfloatset)
coherencepeakfreq = np.zeros(numvalidspatiallocs, dtype=rt_outfloattype)
coherencepass_func = addmemprofiling(
tide_calccoherence.coherencepass,
optiondict["memprofile"],
"before coherencepass",
)
voxelsprocessed_coherence = coherencepass_func(
fmri_data_valid,
theCoherer,
coherencefunc,
coherencepeakval,
coherencepeakfreq,
reportstep,
alt=True,
showprogressbar=optiondict["showprogressbar"],
chunksize=optiondict["mp_chunksize"],
nprocs=1,
alwaysmultiproc=False,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
# save the results of the calculations
outcoherencearray = np.zeros(internalcoherenceshape, dtype=rt_floattype)
outcoherencearray[validvoxels, :] = coherencefunc[:, :]
theheader = copy.deepcopy(nim_hdr)
theheader["toffset"] = coherencefreqstart
theheader["pixdim"][4] = coherencefreqstep
if optiondict["textio"]:
tide_io.writenpvecs(
outcoherencearray.reshape(nativecoherenceshape),
f"{outputname}_coherence.txt",
)
else:
if optiondict["bidsoutput"]:
savename = f"{outputname}_desc-coherence_info"
else:
savename = f"{outputname}_coherence"
if fileiscifti:
timeindex = theheader["dim"][0] - 1
spaceindex = theheader["dim"][0]
theheader["dim"][timeindex] = coherencefreqaxissize
theheader["dim"][spaceindex] = numspatiallocs
tide_io.savetocifti(
outcoherencearray,
cifti_hdr,
theheader,
savename,
isseries=True,
names=["coherence"],
)
else:
theheader["dim"][0] = 3
theheader["dim"][4] = coherencefreqaxissize
tide_io.savetonifti(
outcoherencearray.reshape(nativecoherenceshape), theheader, savename
)
del coherencefunc
del outcoherencearray
TimingLGR.info(
"Coherence calculation end",
{
"message2": voxelsprocessed_coherence,
"message3": "voxels",
},
)
# Post refinement step 0 - Wiener deconvolution
if optiondict["dodeconv"]:
TimingLGR.info("Wiener deconvolution start")
LGR.info("\n\nWiener deconvolution")
reportstep = 1000
# now allocate the arrays needed for Wiener deconvolution
if optiondict["sharedmem"]:
wienerdeconv, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
wpeak, dummy, dummy = allocshared(internalvalidspaceshape, rt_outfloatset)
else:
wienerdeconv = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
wpeak = np.zeros(internalvalidspaceshape, dtype=rt_outfloattype)
wienerpass_func = addmemprofiling(
tide_wiener.wienerpass,
optiondict["memprofile"],
"before wienerpass",
)
voxelsprocessed_wiener = wienerpass_func(
numspatiallocs,
reportstep,
fmri_data_valid,
threshval,
optiondict,
wienerdeconv,
wpeak,
resampref_y,
rt_floatset=rt_floatset,
rt_floattype=rt_floattype,
)
TimingLGR.info(
"Wiener deconvolution end",
{
"message2": voxelsprocessed_wiener,
"message3": "voxels",
},
)
# Post refinement step 1 - GLM fitting to remove moving signal
if optiondict["doglmfilt"]:
TimingLGR.info("GLM filtering start")
LGR.info("\n\nGLM filtering")
reportstep = 1000
if (optiondict["gausssigma"] > 0.0) or (optiondict["glmsourcefile"] is not None):
if optiondict["glmsourcefile"] is not None:
LGR.info(f"reading in {optiondict['glmsourcefile']} for GLM filter, please wait")
if optiondict["textio"]:
nim_data = tide_io.readvecs(optiondict["glmsourcefile"])
else:
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(
optiondict["glmsourcefile"]
)
else:
LGR.info(f"rereading {fmrifilename} for GLM filter, please wait")
if optiondict["textio"]:
nim_data = tide_io.readvecs(fmrifilename)
else:
nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(fmrifilename)
"""meanvalue = np.mean(
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1],
axis=1,
)"""
fmri_data_valid = (
nim_data.reshape((numspatiallocs, timepoints))[:, validstart : validend + 1]
)[validvoxels, :] + 0.0
# move fmri_data_valid into shared memory
if optiondict["sharedmem"]:
LGR.info("moving fmri data to shared memory")
TimingLGR.info("Start moving fmri_data to shared memory")
numpy2shared_func = addmemprofiling(
numpy2shared,
optiondict["memprofile"],
"before movetoshared (glm)",
)
fmri_data_valid = numpy2shared_func(fmri_data_valid, rt_floatset)
TimingLGR.info("End moving fmri_data | |
<reponame>karttur/geoimagine02-postgresdb<gh_stars>0
'''
Created on 8 juni 2018
@author: thomasgumbricht
'''
# Package application imports
from geoimagine.postgresdb import PGsession
from geoimagine.postgresdb.compositions import InsertCompDef, InsertCompProd, InsertLayer, SelectComp
from geoimagine.support import Today
class ManageSentinel(PGsession):
'''
DB support for setting up processes
'''
def __init__(self):
""" The constructor connects to the database"""
HOST = 'karttur'
query = self._GetCredentials( HOST )
#Connect to the Postgres Server
self.session = PGsession.__init__(self,query,'ManageSentinel')
def _InsertSentinelMODISTile(self, query):
self.cursor.execute("SELECT * FROM sentinel.regions WHERE mgrs = '%(mgrs)s' AND regiontype = '%(regiontype)s' AND regionid = '%(regionid)s';" %query)
record = self.cursor.fetchone()
if record == None and not query['delete']:
##print aD['senssat'],aD['typeid'],aD['subtype'], filecat, tD['pattern'], tD['folder'], tD['band'], tD['prefix'],suffix, tD['celltype'], tD['fileext']
self.cursor.execute("INSERT INTO sentinel.regions (regionid, regiontype, mgrs, mgrsid, utm) VALUES (%s, %s, %s, %s, %s)",
(query['regionid'], query['regiontype'],query['mgrs'], query['mgrsid'], query['utmzone']))
self.conn.commit()
elif record and query['delete']:
self.cursor.execute("DELETE FROM modis.regions WHERE mgrs = '%(mgrss' AND regiontype = '%(regiontype)s'AND regionid = '%(regionid)s';" %query)
self.conn.commit()
def _InsertSentinelRegionTile(self, query):
self.cursor.execute("SELECT * FROM %(system)s.regions WHERE regionid = '%(regionid)s';" %query)
record = self.cursor.fetchone()
if record == None:
#print "SELECT * FROM regions WHERE regions.regionid = '%(regid)s' AND regioncat = '%(cat)s' AND type = '%(typ)s';" %query
warnstr = 'WARNING can not add tile to region %(regionid)s, no such region at category %(category)s and type %(type)s' %query
print (warnstr)
return
self.cursor.execute("SELECT * FROM sentinel.regions WHERE mgrs = '%(mgrs)s' AND regiontype = '%(regiontype)s' AND regionid = '%(regionid)s';" %query)
record = self.cursor.fetchone()
if record == None and not query['delete']:
##print aD['senssat'],aD['typeid'],aD['subtype'], filecat, tD['pattern'], tD['folder'], tD['band'], tD['prefix'],suffix, tD['celltype'], tD['fileext']
self.cursor.execute("INSERT INTO sentinel.regions (regionid, regiontype, mgrs, mgrsid, utm) VALUES (%s, %s, %s, %s, %s)",
(query['regionid'], query['regiontype'],query['mgrs'], query['mgrsid'], query['utmzone']))
self.conn.commit()
elif record and query['delete']:
self.cursor.execute("DELETE FROM modis.regions WHERE mgrs = '%(mgrss' AND regiontype = '%(regiontype)s'AND regionid = '%(regionid)s';" %query)
self.conn.commit()
def _SelectSentinelRegionTiles(self,query):
#print ("SELECT path, row from regions.sentinel WHERE regionid = '%(regionid)s'" %query)
self.cursor.execute("SELECT path, row from regions.sentinel WHERE regionid = '%(regionid)s'" %query)
records = self.cursor.fetchall()
return records
def _GetMetaTranslator(self):
#print (self.name)
self.cursor.execute("SELECT * FROM sentinel.metatranslate")
records = self.cursor.fetchall()
recD = {}
for row in records:
recD[row[0]] = {'dst':row[1],'tab':row[2], 'typ':row[3]}
return recD
def _SelectComp(self,system,comp):
comp['system'] = system
return SelectComp(self, comp)
def _SelectLayer(self,system,queryD,paramL):
return self._SingleSearch(queryD,paramL,system,'layers',True)
def _SelectLayerOnLocus(self,system,queryD,paramL):
rec = self._SingleSearch(queryD, paramL, system,' layers')
return dict(zip(paramL,rec))
def _InstertTileMeta(self,queryD):
rec = self._CheckInsertSingleRecord(queryD,'sentinel', 'tilemeta', [('tileid',)])
def _InsertGranuleMeta(self,queryD):
rec = self._CheckInsertSingleRecord(queryD,'sentinel', 'granulemeta', [('granuleid',)])
def _InstertTile(self,queryD):
rec = self._CheckInsertSingleRecord(queryD,'sentinel', 'tiles', [('tileid',)])
if rec != None:
if rec[2] != queryD['mgrs']:
print (rec)
print (queryD)
print (queryD['mgrs'],rec[2])
BALLE
def _InstertGranule(self,queryD):
rec = self._CheckInsertSingleRecord(queryD,'sentinel', 'granules', [('granuleid',)])
def _InsertVectorSearch(self,queryD):
self._CheckInsertSingleRecord(queryD,'sentinel', 'vectorsearches')
def _SelectVectorSearch(self,queryD,paramL):
rec = self._SingleSearch(queryD,paramL,'sentinel', 'vectorsearches')
return rec
def _UpdateTileStatus(self, queryD):
query = "UPDATE sentinel.tiles SET %(column)s = '%(status)s' WHERE tileid = '%(tileid)s'" %queryD
self.cursor.execute(query)
self.conn.commit()
def _UpdateGranuleStatus(self, queryD):
query = "UPDATE sentinel.granules SET %(column)s = '%(status)s' WHERE granuleid = '%(granuleid)s'" %queryD
self.cursor.execute(query)
self.conn.commit()
def _SelectSentinelGranules(self,params, period, statusD):
queryD = {}
queryD['platformname'] = {'val':params.platformname, 'op':'=' }
queryD['product'] = {'val':params.prodtype, 'op':'=' }
if 'cloudcover' in statusD:
queryD['cloudcover'] = {'val':params.cloudmax, 'op':'<=' }
for status in statusD:
queryD[status] = {'val':statusD[status], 'op':'=' }
if period:
datumkey = period.datumL[0]
startdate = period.datumD[datumkey]['startdate']
queryD['acqdate'] = {'val':startdate, 'op':'>=' }
enddate = period.datumD[datumkey]['enddate']
queryD['#acqdate'] = {'val':enddate, 'op':'<=' }
if period.datumD[datumkey]['enddoy'] > 0:
startdoy = period.datumD[datumkey]['startdoy']
queryD['doy'] = {'val':startdoy, 'op':'>=' }
enddoy = period.datumD[datumkey]['enddoy']
queryD['#doy'] = {'val':enddoy, 'op':'<=' }
#if params.orbitdirection.upper() != 'B':
# pass
wherestr = self._DictToSelect(queryD)
query = "SELECT uuid, granuleid, source, product, folder, acqdate, orbitid FROM sentinel.granulemeta \
JOIN sentinel.granules USING (granuleid, product) \
%s;" %(wherestr)
print (query)
self.cursor.execute(query)
return self.cursor.fetchall()
def _SelectSentinelTiles(self,params, period, statusD):
queryD = {}
queryD['m.platformname'] = {'val':params.platformname, 'op':'=' }
queryD['t.product'] = {'val':params.prodtype, 'op':'=' }
for status in statusD:
queryD[status] = {'val':statusD[status], 'op':'=' }
if 'cloudcover' in statusD:
#overwrites and cloudcover from above
queryD['t.cloudcover'] = {'val':params.cloudmax, 'op':'<=' }
datumkey = period.datumL[0]
startdate = period.datumD[datumkey]['startdate']
queryD['t.acqdate'] = {'val':startdate, 'op':'>=' }
enddate = period.datumD[datumkey]['enddate']
queryD['#t.acqdate'] = {'val':enddate, 'op':'<=' }
if period.datumD[datumkey]['enddoy'] > 0:
startdoy = period.datumD[datumkey]['startdoy']
queryD['t.doy'] = {'val':startdoy, 'op':'>=' }
enddoy = period.datumD[datumkey]['enddoy']
queryD['#t.doy'] = {'val':enddoy, 'op':'<=' }
if params.orbitdirection.upper() != 'B':
BALLE
wherestr = self._DictToSelect(queryD)
query = "SELECT DISTINCT ON (m.uuid) m.uuid, t.tileid, t.source, t.product, t.folder, t.acqdate, t.orbitid, t.utm, t.mgrsid, t.mgrs \
FROM sentinel.tilemeta AS M \
INNER JOIN sentinel.tiles AS T ON (M.tileid = T.tileid) "
if 'r.regionid' in statusD:
query += "INNER JOIN sentinel.regions AS R ON (T.mgrs = R.mgrs) "
query += wherestr
self.cursor.execute(query)
return self.cursor.fetchall()
def _SelectSentinelTemplate(self,queryD,paramL):
#return self._SingleSearch(queryD,'modis','template',paramL)
return self._MultiSearch(queryD,paramL,'sentinel','template')
def _InsertLayer(self,layer, overwrite = False, delete = False):
InsertLayer(self,layer,overwrite, delete)
def _InsertTileCoords(self,query):
'''
#rec = self._SingleSearch(query,'sentinel', 'vectorsearches')
'''
self.cursor.execute("SELECT * FROM sentinel.tilecoords WHERE mgrs = '%(mgrs)s';" %query)
record = self.cursor.fetchone()
if record == None:
self._InsertRecord(query, 'sentinel', 'tilecoords')
else:
search = {'mgrs':query['mgrs']}
query.pop('mgrs')
self._UpdateRecord(query, 'sentinel', 'tilecoords', search)
def _SelectSentinelTile(self,query):
self.cursor.execute("SELECT * FROM sentinel.tilecoords WHERE mgrs = '%(mgrs)s';" %query)
return self.cursor.fetchone()
def _SelectSentinelTileCoords(self, searchD):
#construct where statement - LATER
query = {}
self.cursor.execute("SELECT epsg, mgrs,utmzone,mgrsid,minx,miny,maxx,maxy,ullat,ullon,lrlat,lrlon,urlat,urlon,lllat,lllon FROM sentinel.tilecoords;" %query)
records = self.cursor.fetchall()
return records
def _SelectAllDefRegionsOld(self,wherestatement):
print ('wherestatement',wherestatement)
return SelectAllDefRegions(self,'sentinel','regions',wherestatement)
def _SelectAllDefRegions(self, wherestatement = '' ):
query = {'schema': 'sentinel', 'table':'regions', 'where':wherestatement}
if wherestatement == '':
self.cursor.execute("SELECT regioncat, regionid FROM system.defregions;" %query)
else:
#print ("SELECT DISTINCT R.regioncat, R.regionid FROM system.defregions R LEFT JOIN %(schema)s.%(table)s M ON (R.regionid = M.regionid) WHERE %(where)s;" %query)
print ("SELECT DISTINCT R.regioncat, R.regionid FROM system.defregions R LEFT JOIN %(schema)s.%(table)s M ON (R.regionid = M.regionid) %(where)s;" %query)
self.cursor.execute("SELECT DISTINCT R.regioncat, R.regionid FROM system.defregions R LEFT JOIN %(schema)s.%(table)s M ON (R.regionid = M.regionid) %(where)s;" %query)
return self.cursor.fetchall()
#return SelectAllDefRegions(self,'modis','regions',wherestatement)
def _InsertMGRSCoords(self,query):
#rec = self._SingleSearch(query,'sentinel', 'vectorsearches')
self.cursor.execute("SELECT * FROM sentinel.mgrscoords WHERE mgrs = '%(mgrs)s';" %query)
record = self.cursor.fetchone()
if record == None:
self._InsertRecord(query, 'sentinel', 'mgrscoords')
else:
search = {'mgrs':query['mgrs']}
query.pop('mgrs')
self._UpdateRecord(query, 'sentinel', 'mgrscoords', search)
def _InsertSentinelTileCoordOld(self,hvtile,h,v,ulxsin,ulysin,lrxsin,lrysin,ullat,ullon,lrlon,lrlat,urlon,urlat,lllon,lllat):
query = {'hvtile':hvtile}
#source, product, folder, band, prefix, suffix, fileext, celltype, dataunit, compid, hdfgrid, hdffolder, scalefactor, offsetadd, cellnull, retrieve, ecode
self.cursor.execute("SELECT * FROM sentinel.tilecoords WHERE hvtile = '%(hvtile)s';" %query)
record = self.cursor.fetchone()
if record == None:
self.cursor.execute("INSERT INTO sentinel.tilecoords (hvtile,h,v,minxsin,maxysin,maxxsin,minysin,ullat,ullon,lrlon,lrlat,urlon,urlat,lllon,lllat) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ;",
(hvtile,h,v,ulxsin,ulysin,lrxsin,lrysin,ullat,ullon,lrlon,lrlat,urlon,urlat,lllon,lllat))
self.conn.commit()
def _SearchMGRSfromCentroid(self,lon,lat):
query = {'lon':lon, 'lat':lat}
self.cursor.execute("SELECT mgrs,west,south,east,north,ullon,ullat,urlon,urlat,lrlon,lrlat,lllon,lllat FROM sentinel.tilecoords WHERE %(lon)s > west AND %(lon)s < east AND %(lat)s > south and %(lat)s < north;" %query)
records = self.cursor.fetchall()
return records
def _SearchTilesFromWSEN(self, west, south, east, north):
query = {'west':west, 'south':south,'east':east,'north':north}
#self.cursor.execute("SELECT mgrs,west,south,east,north,ullon,ullat,urlon,urlat,lrlon,lrlat,lllon,lllat, minx, miny, maxx, maxy FROM sentinel.tilecoords WHERE centerlon > %(west)s AND centerlon < %(east)s AND centerlat > %(south)s AND centerlat < %(north)s;" %query)
self.cursor.execute("SELECT mgrs,west,south,east,north,ullon,ullat,urlon,urlat,lrlon,lrlat,lllon,lllat, minx, miny, maxx, maxy FROM sentinel.tilecoords WHERE east > %(west)s AND west < %(east)s AND north > %(south)s AND south < %(north)s;" %query)
#self.cursor.execute("SELECT epsg, mgrs,utmzone,mgrsid,minx,miny,maxx,maxy,ullat,ullon,lrlat,lrlon,urlat,urlon,lllat,lllon FROM sentinel.tilecoords;" %query)
records = self.cursor.fetchall()
return records
def _SearchMGRSFromWSEN(self, west, south, east, north, sentinel):
query = {'west':west, 'south':south,'east':east,'north':north,'sentinel':sentinel}
if sentinel:
self.cursor.execute("SELECT mgrs,west,south,east,north FROM sentinel.mgrscoords WHERE east > %(west)s AND west < %(east)s AND north > %(south)s AND south < %(north)s AND sentinel = '%(sentinel)s';" %query)
else:
self.cursor.execute("SELECT mgrs,west,south,east,north FROM sentinel.mgrscoords WHERE east > %(west)s AND west < %(east)s AND north > %(south)s AND south < %(north)s;" %query)
records = self.cursor.fetchall()
return records
def _InsertGranuleTiles(self, granuleid, tileD):
query = {'granuleid':granuleid}
self.cursor.execute("SELECT mgrs FROM sentinel.granuletiles WHERE granuleid = '%(granuleid)s';" %query)
records = self.cursor.fetchall()
mgrsL = [item[0] for item in records]
for tile in tileD:
if tile not in mgrsL:
query['tile'] = tile
query['overlap'] = tileD[tile]
self.cursor.execute("INSERT INTO sentinel.granuletiles (granuleid, mgrs, overlap) VALUES ('%(granuleid)s', '%(tile)s', %(overlap)s)" %query)
self.conn.commit()
def _SelectGranuleTiles(self, granuleid,overlap):
#query = {'granuleid':granuleid}
query = {'granuleid':granuleid,'overlap':overlap}
self.cursor.execute("SELECT mgrs FROM sentinel.granuletiles WHERE granuleid = '%(granuleid)s' and overlap >= %(overlap)s;" %query)
records = self.cursor.fetchall()
mgrsL = [item[0] for item in records]
return mgrsL
def _GetGranuleMeta(self, granuleid):
query = {'granuleid':granuleid}
#print ("SELECT product, proclevel, orbitnr, orbitdir, cloudcover, sensopmode, s2datatakeid, procbase, platformid, platformname, instrument \
#FROM sentinel.granulemeta WHERE granuleid = '%(granuleid)s';" %query)
self.cursor.execute("SELECT product, proclevel, orbitnr, orbitdir, cloudcover, sensopmode, s2datatakeid, procbase, platformid, platformname, instrument \
FROM sentinel.granulemeta WHERE granuleid = '%(granuleid)s';" %query)
record = self.cursor.fetchone()
return record
def _GetGranuleTile(self, granuleid):
query = {'granuleid':granuleid}
#print ("SELECT orbitid, acqdate, acqtime, sunazimuth, sunelevation, doy, source, product, folder, filetype, filename, downloaded, organized, exploded, deleted, declouded, maskstatus, metacheck, tgnote \
#FROM sentinel.granules WHERE | |
<filename>KWsearch.pyw
#-*- coding: utf-8 -*-
"""KWsearch v0.1 by <NAME> (<EMAIL>)
Програма призначена для швидкого пошуку в каталозі за ключовими словами.
Ключові слова вводяться в файлі class.pykb (кодування utf-8),
який розташований цьому каталогі, і містить наприклад:
kw("книга", "book", "python")
Може працювати як плагін Total Commander.
Для цього перетягніть модуль на панель Total Commander і добавте параметр:
?%P -k або %P -k для додання ключових слів
?%P -i або %P -i для індексування поточного каталогу
?%P -s або %P -s для пошуку в поточному каталозі
Протестовано на Windows XP russian та Python 2.6"""
# Добавлено нечутливість до регістру букв
# Виправлено дублювання шляхів, які виводяться на вікно під час пошуку
# Добавлено режим редагування ключових слів
# Зробити можливість запуску коду бази в інтерпретаторі (execfile).
# Добавити меню
import os,sys
import Tkinter
import tkMessageBox
import pickle
import re
def makeCode(dr):
"""Повертає python код, згенерований за кореневим каталогом dr (unicode рядок)
Додає в ключові слова також назву каталога і файлів каталогу"""
# усі рядки повинні бути unicode !!!
fileTypes=['.pdf','.djvu','.djv','.doc','.xls','.url','.txt']
print "Please wait"
KBcode=[]
i=0 # для прогресу виконання обробки великих каталогів
for root, dirs, files in os.walk(dr):
KBcode.append(u'this=r"""'+root+'"""')#root.decode('mbcs')
# додає тільки назви файлів заданого типу
KBcode.append(u'files='+'['+u','.join(['"'+f+'"' for f in files if os.path.splitext(f)[1].lower() in fileTypes])+']')
#KBcode.append(u'files=[]') # можна і без файлів
if "class.pykb" in files: # якщо є файл class.pykb
s=open(os.path.join(root,"class.pykb")).read() # прочитати його
try:
s=s.decode('utf-8') # намагатись конвертувати utf-8 в unicode
except:
s=u"kw()" # якщо не вийшло, то пустий список ключових слів
KBcode.append(s) # додати
else: # якщо файлу class.pykb немає
KBcode.append(u"kw()") # додати пустий список ключових слів
i+=1
if not i%1000: # кожні 1000 каталогів
print i # виводити i
print i," directories"
# початок коду
code1=u"""#-*- coding: utf-8 -*-
import os
def kw(*w):
'Додає в список index ключові слова для даного каталога'
index.append([this]+list(w)+files+[os.path.basename(this)])
"""
code=u"\n".join(KBcode)
code=code1+code
code=code.encode('utf-8')
writeCode(code)
return code
def writeCode(code):
f=open('allKBcode.py','w')
f.write(code)
f.close()
def run(dr):
"""Будує базу індексів для каталогу dr (unicode рядок)"""
ns={'index':index} # простір імен
code=makeCode(dr) # отримати весь код
exec(code, ns) # виконати код в просторі імен
def index2Unicode(index):
return [[w.decode('utf-8') for w in r] for r in index]
def getIndexW(lst):
"""Повертає відсортований список елементів (для швидкого пошуку)
[ключове слово, індекс каталогу]
lst - список елементів [шлях каталогу, ключ. слово1, ключ. слово2,...]"""
i=0 # поточний індекс lst
indexW=[]
for ws in lst: # для кожного елемента lst
for w in ws[1:]: # для кожного ключового слова
indexW.append([w.lower(),i]) # добавити [ключове слово, індекс каталогу]
i+=1
return sorted(indexW) # відсортувати за ключовими словами
def loadIndex():
"""Завантажує індекси з файлів
Повертає звичайний і відсортований списки ключових слів
Якщо файл не існує, повертає [],[]"""
fn=os.path.join(rootdir,'kwindex.pkl')
if not os.path.isfile(fn): return [],[]
f = open(fn, 'rb')
index=pickle.load(f)
indexW=pickle.load(f)
f.close()
return index,indexW
def saveIndex():
"""Зберігає індекси у файли"""
fn=os.path.join(rootdir,'kwindex.pkl')
f = open(fn, 'wb')
pickle.dump(index, f) # звичайний список
pickle.dump(indexW, f) # відсортований список
f.close()
def findSlice(lst,word):
"""Шукає індекси діапазону в відсортованому списку lst,
в якому слова починаються з word. Для швидкого пошуку"""
js=0 # індекс початку діапазону
n=0 # кількість елементів в діапазоні
i=0 # поточний індекс lst
for w,k in lst: # для кожного елемента lst
if w.startswith(word.lower()): # якщо w починається з word
if n==0: # якщо до цього ще нічого не знайдено
js=i # запам'ятати індекс початку діапазону
n+=1 # збільшити кількість елементів в діапазоні
elif n!=0: # інакше якщо було вже щось знайдено
break # перервати
i+=1
return js,js+n
def find(lst,regex):
"""Повертає список елементів зі списку lst, які містять regex"""
# ігнорує регістр букв
po=re.compile(regex,re.IGNORECASE | re.UNICODE) # компілює шаблон в об'єкт регулярного виразу
res=[] # список результатів
for w,k in lst: # для кожного елемента lst
mo=po.search(w)# знаходить у s першу відповідність шаблону
if mo: # якщо знайдено
res.append([w,k]) # добавити в список
return res
#################################################################
#функції для додання ключових слів у файл class.pykb
def readKW(rootdir):
"""Повертає список ключових слів з файлу class.pykb"""
fl=os.path.join(rootdir,"class.pykb") # шлях до class.pykb
if not os.path.isfile(fl): # якщо немає такого файлу
open(fl,'w').close() # створити пустий
return []
KWlst=[] # список ключових слів
i,lns=findKWline(fl) # знайти номер рядка з ключовими словами
if i==None: return [] # якщо немає рядка з ключовими словами
ln=lns[i].rstrip() # рядок з ключовими словами
for kw in ln[3:-1].split(","):
KWlst.append(kw.strip()[1:-1]) # додати ключове слово
if KWlst==[""]: KWlst=[] # якщо містить пусте слово, то пустий список
return KWlst
def findKWline(fl):
"""Повертає номер рядка з ключовими словами і список рядків"""
f=open(fl,'r') # відкрити для читання
lns=f.readlines() # список рядків
f.close()
for i,ln in enumerate(lns): # для кожного рядка
ln=ln.rstrip() # без пробілів і '\n' вкінці
if ln.startswith("kw(") and ln.endswith(")"): # якщо починається з "kw("
return i,lns
return None,lns # якщо не знайдено
def writeKW(rootdir, kw):
"""Записує список ключових слів у файл class.pykb"""
fl=os.path.join(rootdir,"class.pykb") # шлях до class.pykb
kw=['"'+w+'"' for w in kw] # додати лапки до кожного ключового слова
newline="kw("+", ".join(kw)+")\n" # сформувати рядок з ключовими словами
i,lns=findKWline(fl) # знайти номер рядка з ключовими словами
if i==None: # якщо не знайдено
lns=[newline]+lns # добавити як перший рядок
else: # інакше
lns[i]=newline # вставити замість рядка i
f=open(fl,'w')
f.writelines(lns) # зберегти
f.close()
####################################################################
class MainWindow(Tkinter.Tk):
'''Клас головного вікна програми GUI'''
def __init__(self, kw=None):
'''Конструктор. kw - список ключових слів'''
Tkinter.Tk.__init__(self) #виклик конструктора базового класу
sizex, sizey = self.wm_maxsize() #повернути max розмір
sizex, sizey=sizex, sizey-60
self.wm_geometry("%dx%d+0+0"%(sizex,sizey)) # установити розмір
font = 'times 14 bold' # шрифт
self.s1 = Tkinter.StringVar() # змінна для збереження тексту
self.entry=Tkinter.Entry(self, textvariable=self.s1, font=font,) #width=sizex-4
self.entry.pack(side='top', fill=Tkinter.X, pady=20)# розмістити
self.frame2 = Tkinter.Frame(self) # фрейм з Listbox
self.listbox = Tkinter.Listbox(self.frame2, selectmode=Tkinter.SINGLE,exportselection=0)
scrollbar_y = Tkinter.Scrollbar(self.frame2)
scrollbar_x = Tkinter.Scrollbar(self.frame2, orient=Tkinter.HORIZONTAL)
scrollbar_y.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
scrollbar_x.pack(side=Tkinter.BOTTOM, fill=Tkinter.X)
self.listbox.pack(side=Tkinter.TOP, fill=Tkinter.Y, expand=1)
scrollbar_y['command'] = self.listbox.yview #scrollbar_y.config(command=listbox.yview)
scrollbar_x['command'] = self.listbox.xview
self.listbox['yscrollcommand'] = scrollbar_y.set
self.listbox['xscrollcommand'] = scrollbar_x.set
self.listbox['width']=140
self.frame2.pack(side=Tkinter.TOP, fill=Tkinter.Y, expand=1)
if kw==None: # якщо режим пошуку
self.title("KWsearch v0.1 - search mode") # заголовок
self.entry.bind("<KeyRelease>", self.keyRelease)
self.listbox.bind('<Double-ButtonRelease-1>', self.dblClicked1)
self.totalcmd=r"c:\totalcmd\TOTALCMD.EXE" # шлях до Total Commander
if not os.path.isfile(self.totalcmd): # якщо файлу не існує
tkMessageBox._show('Warning',self.totalcmd+'\n - incorrect Total Commander path!',icon=tkMessageBox.WARNING,type=tkMessageBox.OK)
# поточний список елементів [ключове слово, індекс каталогу]
# потрібний для швидкого пошуку
self.indexWcur=indexW
else: # якщо режим додання ключових слів
self.title("KWsearch v0.1 - add keyword mode") # заголовок
self.entry.bind("<Return>", self.keyReturn)
self.listbox.bind('<Double-ButtonRelease-1>', self.dblClicked2)
self.kw=kw
self.buildListKW()
#############################################################
# функції для режиму пошуку
def buildList(self, lst):
"""Заповнює self.listbox елементами lst"""
drs=set() # множина каталогів (для уникнення дублювань)
for kw,i in lst: # для кожного у списку
drs.add(index[i][0]) #добавити в множину назву каталогу
for dr in drs:
self.listbox.insert(Tkinter.END, dr) # добавити в список
def keyRelease(self,event):
"""Відпускання клавіші під час набору тексту в entry
Виводить результати відразу після натискання клавіш.
Після натискання Enter виводить результати за регулярним виразом"""
self.listbox.delete("0", Tkinter.END) # очистити список
text= self.s1.get() # отримати текст з поля вводу
if text=="":
self.indexWcur=indexW
return
#http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
if event.keysym=="Return": # якщо натиснуто Enter
ind=find(indexW, text) # знайти за регулярним виразом
self.buildList(ind) # вивести
return
sl=findSlice(self.indexWcur,text) # знайти індекси діапазону
if sl[1]!=0: # якщо щось знайдено
self.indexWcur=self.indexWcur[sl[0]:sl[1]] # діапазон списоку елементів [ключове слово, індекс каталогу]
self.buildList(self.indexWcur) # вивести
def dblClicked1(self,event=None):
"""Подвійний натиск мишею. Відкриває каталог в Total Commander"""
selIndex=self.listbox.curselection()[0] # індекс вибраного елементу
path=self.listbox.get(selIndex) # текст вибраного елементу
if os.path.isdir(path): # якщо каталог існує
# відкрити його в totalcmd (який має бути запущений)
os.system(self.totalcmd+' /O "'+path.encode('mbcs')+'"') # шлях в лапках!
#############################################################
# функції для режиму додавання ключових слів
def keyReturn(self,event):
"""Натиск Enter. Додає нове ключове слово"""
text= self.s1.get().encode('utf-8') # отримати текст з поля вводу
if text and text not in self.kw: # перевірити корректність ключового слова
self.kw.append(text) # добавити ключове слово
writeKW(rootdir, self.kw) # записати ключові слова
self.buildListKW() # перебудувати список
def buildListKW(self):
"""Будовує список ключових слів"""
self.listbox.delete("0", Tkinter.END) # очистити список
for kw in self.kw: # для кожного ключового слова
self.listbox.insert(Tkinter.END, kw) # добавити в список
def dblClicked2(self,event=None):
"""Подвійний натиск мишею. Видаляє ключове слово"""
selIndex=self.listbox.curselection()[0] # індекс вибраного елементу
text=self.listbox.get(selIndex) # текст вибраного елементу
self.kw.remove(text.encode('utf-8')) # видалити ключове слово
writeKW(rootdir, self.kw) # записати ключові слова
self.buildListKW() # перебудувати список
##################################################################
# глобальні змінні
rootdir=None # кореневий каталог (unicode)
index=[] # список unicode елементів [шлях каталогу, ключ. слово1, ключ. слово2,...]
indexW=[] # відсортований список елементів [ключове слово, індекс каталогу]
if __name__ == '__main__':
#sys.argv.append(ur"d:\!Music_unsaved") # !! для |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.