input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
#Import all necessary libraries
#Data File Libraries
import csv
import pandas as pd
import glob
import os
#Math Function Libraries
import math
import statistics
#3D Graphing Libraries
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
#Define Necessary Functions
#Define function to calculate mode
#Returns the mode of the inputted data
def calculate_mode(input_list):
count_dict = {}
found_mode = []
max_count = 0
for value in input_list:
if value not in count_dict:
count_dict[value] = 0
count_dict[value] += 1
if count_dict[value] > max_count:
max_count = count_dict[value]
for value, count in count_dict.items():
if count == max_count:
found_mode = value
return found_mode, max_count
#Define function to calculate mean
#Returns the mean of the inputted data
def calculate_mean(input_list):
sum = 0
count = len(input_list)
for value in input_list:
if math.isnan(value) == False:
sum += value
found_mean = sum / count
return found_mean
#Define function to calculate median
#Returns the median of the inputted data
def calculate_median(input_list):
found_median = statistics.median_low(input_list)
return found_median
#Define function to calculate range
#Returns the range of the inputted data
def calculate_range(input_list):
sorted_input_list = sorted(input_list)
lowest_value = sorted_input_list[0]
highest_value = sorted_input_list[-1]
found_range = highest_value - lowest_value
return found_range
#Define function to perform all calculations at once
#Returns final values from above 4 functions
def calculation_processor(input_list):
found_mode, max_count = calculate_mode(input_list)
found_mean = calculate_mean(input_list)
found_median = calculate_median(input_list)
found_range = calculate_range(input_list)
return found_mode, found_mean, found_median, found_range
#Define function to present processed data
#Returns processed data in easy-to-read manner
def data_return(found_mode, found_mean, found_median, found_range, data_metric, data_file):
print("\nYou analyzed the metric {data_metric} from the file {data_file}.".format(data_metric = data_metric, data_file = data_file))
print("\nThe mode was {found_mode}".format(found_mode = found_mode))
print("\nThe mean was {found_mean}".format(found_mean = found_mean))
print("\nThe median was {found_median}".format(found_median = found_median))
print("\nThe range was {found_range}".format(found_range = found_range))
#Define function to gather a list for a specific metric from all files in a folder (ASK ABOUT GLOB + PANDAS)
#Returns a list that serves as input for future functions
def multiple_file_panda_read(data_folder, data_metric):
input_list = []
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
df_collection = (pd.read_csv(f) for f in filenames)
concatenated_df = pd.concat(df_collection, ignore_index = True, sort = True)
input_list = concatenated_df[data_metric]
return input_list
#Define function to gather a list for a specific metric from a single file
#Returns a list that serves as input for future functions
def single_file_panda_read(data_folder, data_file, data_metric):
file_storage_value = ''
input_list = []
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
if data_file in filenames:
file_storage_value = data_file
df = pd.read_csv(file_storage_value)
input_list = df[data_metric]
return input_list
#Define function to return a plot of the XYZ scatter plot graph
#Returns a 3D scatter plot graph
def X_Y_Z_plot(data_folder, data_file, graph_parameters, pathname, save_graph):
coordinate_dictionary = {}
file_storage_value = ''
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
if (data_file == 'all.csv'):
df_collection = (pd.read_csv(f) for f in filenames)
concatenated_df = pd.concat(df_collection, ignore_index=True)
dataframe = concatenated_df
else:
if data_file in filenames:
file_storage_value = data_file
dataframe = pd.read_csv(file_storage_value)
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.scatter3D(dataframe["X"], dataframe["Y"], dataframe["Z"], c = 'r', marker = 'o');
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
if save_graph == 1:
plt.savefig(os.path.join(pathname, r'3D_Graph.png'), bbox_inches='tight')
plt.show()
else:
plt.show()
#Define function to compose a 2Dimensional plot of X vs Y graphs
#Returns a 2D Scatter plot graph
def X_Y_plot(data_folder, data_file, graph_parameters, pathname, save_graph):
coordinate_dictionary = {}
file_storage_value = ''
os.chdir("/" + data_folder)
filenames = [i for i in glob.glob('*.csv')]
if (data_file == 'all.csv'):
df_collection = (pd.read_csv(f) for f in filenames)
concatenated_df = pd.concat(df_collection, ignore_index = True, sort = True)
dataframe = concatenated_df
else:
if data_file in filenames:
file_storage_value = data_file
dataframe = pd.read_csv(file_storage_value)
x_list = list(dataframe["X"])
y_list = list(dataframe["Y"])
current_axis = plt.gca()
current_axis.add_patch(Rectangle((calculate_mean(x_list) - .4, calculate_mean(y_list) - .4), (graph_parameters[1] - graph_parameters[0]), (graph_parameters[3] - graph_parameters[2]), facecolor = 'grey'))
plt.plot(x_list, y_list, 'ro')
x_list_sorted = sorted(x_list)
y_list_sorted = sorted(y_list)
parameters = [(x_list_sorted[0] - 1), (x_list_sorted[-1] + 1), (y_list_sorted[0] - 1), (y_list_sorted[-1] + 1)]
plt.axis(parameters)
plt.grid(axis = 'both')
if save_graph == 1:
plt.savefig(os.path.join(pathname, r'3D_Graph.png'), bbox_inches='tight')
plt.show()
else:
plt.show()
#Define If-Else function to determine whether to use single file function or multiple file function
#Returns a list that serves as input for future functions
def single_or_multiple_panda_reader(data_folder, data_file, data_metric):
if (data_file == "all.csv"):
input_list = multiple_file_panda_read(data_folder, data_metric)
return input_list
else:
input_list = single_file_panda_read(data_folder, data_file, data_metric)
return input_list
#Define a function to clean the raw data into processable data
#Returns a clean input_list
def data_list_cleaner(input_list):
input_list_clean = list(map(float, input_list))
for element in input_list_clean:
if (math.isnan(element) == False):
continue
else:
element_index = input_list_clean.index(element)
input_list_clean.pop(element_index)
return input_list_clean
#Define a function to translate a dictionary into a csv file at a predestined location
def dictionary_to_csv(input_dictionary, pathname):
df = pd.DataFrame(input_dictionary, index = ["Mode", "Mean", "Median", "Range", "Data Range [Lower, Upper]", "Most Popular Option", "Most Popular Count"])
df.to_csv(os.path.join(pathname, r'Processed_Data.csv'))
#Define a function to calculate where the data is most dense(Coordinate System)
#Returns a list of the upper and lower bounds
def variable_density(input_list):
select_mad = np.mean(np.absolute(input_list - np.mean(input_list)))
found_mean = calculate_mean(input_list)
coordinate_limits = [(found_mean - select_mad), (found_mean + select_mad)]
return coordinate_limits # lower bound then upper bound
#Define function to determine whether or not to continue the while loop based on input
#Returns a variable that stops while loop or continues it
def continue_loop(data_folder, data_file, data_metric):
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
#Determines if user wants to analyze anything else (Gateways act as checkpoints)
metric_gateway = input("\nWould you like to analyze another metric from the same data file(s)(y/n)? ")
#Determines if user wants to analyze another file
if (metric_gateway.lower() == "n"):
file_gateway = input("\nWould you like to analyze data from a different data file(s)(y/n)? ")
#Determines if user wants to access a different folder
if (file_gateway.lower() == "n"):
folder_gateway = input("\nWould you like to analyze data from another folder(y/n)? ")
if (folder_gateway.lower() == "n"):
placeholder = 0
#Asks user to enter new data based on previous answer
else:
data_folder = input("\nWhat folder would you like to analyze data from? ")
data_file = input("\nWhat data file would you like to analyze? ")
data_metric = input("\nWhich metric would you like to analyze from the above file(s)? ")
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
else:
data_file = input("\nWhat data file would you like to analyze? ")
data_metric = input("\nWhich metric would you like to analyze from the above file(s)? ")
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
else:
data_metric = input("\nWhich metric would you like to analyze from the above file(s)? ")
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
#Determines if loop should stop
if (metric_gateway.lower() == "n") and (file_gateway.lower() == "n") and (folder_gateway.lower() == "n"):
stop = 1
else:
stop = 0
return stop, data_folder, data_file, data_metric
#Define function to gather data_folder, data_file, and data_metric to be used from user
#Returns file(s) and metric to analyze
def specific_introduction_interface():
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
data_folder = input("\nWhat is the name of the folder the file(s) are stored in? ")
print("\nTo analyze all data files enter the keyword \"all\".")
data_file = input("Which data file(s) would you like to analyze? ")
print("\nTo create an XY and XYZ graph enter the keyword \"graph\".")
data_metric = input("Which metric would you like to analyze from the above file(s)? ")
print("\n----------------------------------------------------------------------------------------------------------------------------------------------------")
data_file_correct = data_file + ".csv"
return data_folder, data_file_correct, data_metric
#Specific Data Collection function, contains everything
def specific_file_and_metric():
#Defining initial variables
stop = 0
graph_parameters = []
save_graph = 0
pathname = "N/A"
#Find initial data acquisition area
data_folder, data_file, data_metric = specific_introduction_interface()
#While loop to repeat function as necessary
while (stop == 0):
#If data_metric = "graph", creates and returns a plot of the XYZ 3D graph and XY 2D graph, else normal data collection(0 = no parameters, 1 = special parameters)
if data_metric == "graph":
for letter in "XYZ":
input_list = multiple_file_panda_read(data_folder, letter)
data_range = variable_density(input_list)
for value in data_range:
graph_parameters.append(value)
X_Y_plot(data_folder, data_file, graph_parameters, pathname, save_graph)
X_Y_Z_plot(data_folder, data_file, graph_parameters, pathname, save_graph)
else:
if (data_metric == "Button_active") or (data_metric == "Screen_mode"):
#Gathers data to analyze
input_list = multiple_file_panda_read(data_folder, data_metric)
#Calculates all possible results given the available metric
most_popular_option, popular_option_count = calculate_mode(input_list)
#Returns the processed data to the user
print("\nYou analyzed the metric {data_metric} from the file | |
'Liston'},
'61745005':{'en': 'Coondarra'},
'61745004':{'en': 'Chinchilla'},
'61745007':{'en': '<NAME>'},
'61745006':{'en': 'Dalby'},
'61745001':{'en': 'Brigalow'},
'61745000':{'en': 'Bowenville'},
'61745003':{'en': 'Cecil Plains'},
'61745002':{'en': 'Bunya Mountains'},
'61380994':{'en': 'Sunbury'},
'61380995':{'en': 'Sunbury'},
'61380996':{'en': 'Sunbury'},
'61380997':{'en': 'Sunbury'},
'61745832':{'en': 'Paroo'},
'61745785':{'en': 'Roma'},
'61380998':{'en': 'Sunbury'},
'61380999':{'en': 'Sunbury'},
'61734788':{'en': 'Dunwich'},
'61734789':{'en': 'Bribie Island'},
'61734782':{'en': 'Kooringal'},
'61734783':{'en': 'Dunwich'},
'61743180':{'en': 'Gaeta'},
'61734781':{'en': 'Russell Island'},
'61734786':{'en': 'Dunwich'},
'61734787':{'en': 'Kooringal'},
'61734784':{'en': 'Bribie Island'},
'61734785':{'en': 'Dunwich'},
'61741632':{'en': 'Nanango'},
'61741633':{'en': 'Nanango'},
'61741630':{'en': 'Blackbutt'},
'61741395':{'en': 'Mulgildie'},
'61741631':{'en': 'Nanango'},
'61741636':{'en': 'Kingaroy'},
'61741637':{'en': 'Nanango'},
'61740239':{'en': 'Weipa'},
'61740238':{'en': 'Walsh River'},
'61740989':{'en': 'Daintree'},
'61740988':{'en': 'Mossman'},
'61740235':{'en': 'Thursday Island'},
'61740234':{'en': 'South Johnstone'},
'61740237':{'en': 'Tully'},
'61740236':{'en': 'Torres'},
'61740231':{'en': 'Peninsula'},
'61740230':{'en': 'Mutchilba'},
'61740233':{'en': 'Silkwood'},
'61740232':{'en': 'Ravenshoe'},
'61742585':{'en': 'Gordonvale'},
'61742584':{'en': 'Gordonvale'},
'61742587':{'en': 'Mossman'},
'61742586':{'en': 'Gordonvale'},
'61742589':{'en': 'Atherton'},
'61742588':{'en': 'Mossman'},
'61744309':{'en': 'Townsville'},
'61742073':{'en': 'Bloomfield'},
'61744308':{'en': 'Townsville'},
'61742072':{'en': 'Bloomfield'},
'61747025':{'en': 'Bowen'},
'61747024':{'en': 'Bowen'},
'61747027':{'en': 'Briaba'},
'61747026':{'en': 'Briaba'},
'6174068':{'en': 'Tully'},
'6174067':{'en': 'Babinda'},
'6174066':{'en': 'Euramo'},
'6174061':{'en': 'Innisfail'},
'61742078':{'en': 'Minnamoolka'},
'61743407':{'en': 'Booyal'},
'61747023':{'en': 'Bowen'},
'61747022':{'en': 'Boulia'},
'61743405':{'en': 'Blackbutt'},
'61743404':{'en': 'Biggenden'},
'61741142':{'en': 'Maryborough'},
'61741634':{'en': 'Nanango'},
'61741143':{'en': 'Tansey'},
'61388998':{'en': 'Melbourne'},
'61388999':{'en': 'Melbourne'},
'61388996':{'en': 'Melbourne'},
'61388997':{'en': 'Melbourne'},
'61741809':{'en': 'Brooklands'},
'61741808':{'en': 'Blackbutt'},
'61741635':{'en': 'Johnstown West'},
'61741801':{'en': 'Gooroolba'},
'61741148':{'en': 'Tiaro'},
'61741803':{'en': 'Monto'},
'61741802':{'en': 'Monogorilby'},
'61741805':{'en': 'Mulgildie'},
'61741804':{'en': 'Moonford'},
'61741807':{'en': 'Tandora'},
'61741149':{'en': 'Monto'},
'61746468':{'en': 'Paroo'},
'61746469':{'en': 'Wallumbilla'},
'61746462':{'en': 'Toowoomba'},
'61746463':{'en': 'Toowoomba'},
'61746460':{'en': 'Toowoomba'},
'61746461':{'en': 'Toowoomba'},
'61746466':{'en': 'Charleville'},
'61746467':{'en': 'Cunnamulla'},
'61746464':{'en': 'Toowoomba'},
'61746465':{'en': 'Augathella'},
'61745160':{'en': 'Brigalow/Bimbadeen'},
'61746691':{'en': 'Chinchilla'},
'61361362':{'en': 'Geeveston'},
'61361363':{'en': 'Gretna'},
'61361360':{'en': 'Dover'},
'61361361':{'en': 'Dunalley'},
'61361366':{'en': 'Huonville'},
'61361367':{'en': 'Kempton'},
'61361364':{'en': 'Hermitage'},
'61361365':{'en': 'Hobart'},
'6173922':{'en': 'Brisbane'},
'61361368':{'en': 'Little Swanport'},
'61361369':{'en': 'Margate'},
'6139925':{'en': 'Melbourne'},
'6139292':{'en': 'Melbourne'},
'6139927':{'en': 'Melbourne'},
'6139926':{'en': 'Melbourne'},
'6139921':{'en': 'Melbourne'},
'6139920':{'en': 'Melbourne'},
'6139923':{'en': 'Melbourne'},
'61741650':{'en': 'Tandora'},
'6139922':{'en': 'Melbourne'},
'61738278':{'en': 'Beenleigh'},
'61738279':{'en': 'Beenleigh'},
'61738272':{'en': 'Cleveland'},
'61738273':{'en': 'Cleveland'},
'61738270':{'en': 'Cleveland'},
'61738271':{'en': 'Cleveland'},
'61738276':{'en': 'Beenleigh'},
'61738277':{'en': 'Beenleigh'},
'61738274':{'en': 'Cleveland'},
'61738275':{'en': 'Beenleigh'},
'61741651':{'en': 'Eidsvold'},
'61362918':{'en': 'Little Swanport'},
'61362949':{'en': 'New Norfolk'},
'61362948':{'en': 'Margate'},
'61362943':{'en': 'South Bruny'},
'61362942':{'en': 'Geeveston'},
'61362941':{'en': 'Dover'},
'61362940':{'en': 'Hobart'},
'61362947':{'en': 'Huonville'},
'61362946':{'en': 'Hobart'},
'61362945':{'en': 'Brighton'},
'61362944':{'en': 'Southwest'},
'61745966':{'en': 'Toowoomba'},
'61745967':{'en': 'Tara'},
'61745964':{'en': 'Goombungee'},
'61745965':{'en': 'Aramac'},
'61745962':{'en': 'Goombungee'},
'61745963':{'en': 'Goombungee'},
'61745960':{'en': 'Cambooya'},
'61745961':{'en': 'Toowoomba'},
'61745968':{'en': 'Dalby'},
'61745969':{'en': 'Dalby'},
'6139572':{'en': 'Melbourne'},
'6139573':{'en': 'Melbourne'},
'6139570':{'en': 'Melbourne'},
'6139571':{'en': 'Melbourne'},
'6139576':{'en': 'Melbourne'},
'6139577':{'en': 'Melbourne'},
'6139574':{'en': 'Clayton'},
'6139575':{'en': 'Melbourne'},
'6139578':{'en': 'Melbourne'},
'6139579':{'en': 'Melbourne'},
'61741653':{'en': 'Mundubbera'},
'61741654':{'en': 'Mundubbera'},
'6174661':{'en': 'Warwick'},
'61744169':{'en': 'Prairie'},
'61741655':{'en': 'Mundubbera'},
'61744168':{'en': 'Pentland'},
'6138190':{'en': 'Kalkallo'},
'6138199':{'en': 'Melbourne'},
'61744164':{'en': 'Long Pocket'},
'61744166':{'en': 'Mount Fox'},
'61744161':{'en': 'Karumba'},
'61744160':{'en': 'Kalkadoon'},
'61364089':{'en': 'Yambacoona'},
'61746066':{'en': 'Eschol'},
'61745618':{'en': 'Isisford'},
'61745619':{'en': 'Jericho'},
'61745610':{'en': 'Diamantina'},
'61745611':{'en': 'Dirranbandi'},
'61745612':{'en': 'Dirranbandi'},
'61745613':{'en': 'Haddon'},
'61745614':{'en': 'Haddon'},
'61745615':{'en': 'Injune'},
'61745616':{'en': 'Injune'},
'61745617':{'en': 'Isisford'},
'61746067':{'en': 'Freestone'},
'61734004':{'en': 'Dayboro'},
'61745820':{'en': 'Lynd Range'},
'61746064':{'en': '<NAME>'},
'61741129':{'en': 'Monogorilby'},
'61745809':{'en': 'Jericho'},
'61746065':{'en': 'Elbow Valley'},
'61746760':{'en': 'Wyaga'},
'61745511':{'en': 'Inglewood'},
'61746062':{'en': 'Dulacca'},
'61742178':{'en': 'South Johnstone'},
'61742179':{'en': 'Mutchilba'},
'61746129':{'en': 'Helidon'},
'61746128':{'en': 'Haden'},
'61742174':{'en': 'Innot Hot Springs'},
'61742175':{'en': '<NAME>'},
'61742176':{'en': 'Herberton'},
'61742177':{'en': 'Babinda'},
'6138510':{'en': 'Clayton'},
'61742171':{'en': 'Hopevale'},
'61742172':{'en': 'Malanda'},
'61742173':{'en': 'Georgetown'},
'61741470':{'en': 'Burnett'},
'61741471':{'en': 'Eidsvold'},
'61741472':{'en': 'Tandora'},
'61741473':{'en': 'Chahpingah'},
'61741474':{'en': '<NAME>'},
'61741475':{'en': 'Kingaroy'},
'61741476':{'en': 'Kumbia'},
'61741477':{'en': 'Maidenwell'},
'61741478':{'en': 'Nanango'},
'61741479':{'en': 'Yarraman'},
'61731509':{'en': 'Brisbane'},
'61731508':{'en': 'Brisbane'},
'61731503':{'en': 'Sandgate'},
'61731502':{'en': 'Sandgate'},
'61731501':{'en': 'Brisbane'},
'61731500':{'en': 'Brisbane'},
'61731507':{'en': 'Brisbane'},
'61731506':{'en': 'Sandgate'},
'61731504':{'en': 'Sandgate'},
'61746060':{'en': 'Diamondy'},
'6136230':{'en': 'Hobart'},
'6136231':{'en': 'Hobart'},
'6136232':{'en': 'Hobart'},
'6136233':{'en': 'Hobart'},
'6136234':{'en': 'Hobart'},
'6136235':{'en': 'Hobart'},
'6136236':{'en': 'Hobart'},
'6136237':{'en': 'Hobart'},
'6136238':{'en': 'Hobart'},
'6136239':{'en': 'Hobart'},
'61746699':{'en': 'Dalby'},
'61746698':{'en': 'Dalby'},
'61746061':{'en': 'Dirranbandi'},
'61741614':{'en': 'Gayndah'},
'61741615':{'en': 'Gayndah'},
'61741616':{'en': 'Gooroolba'},
'61741617':{'en': 'Burnett'},
'6139019':{'en': 'Melbourne'},
'6139018':{'en': 'Melbourne'},
'61741612':{'en': 'Gayndah'},
'61741613':{'en': 'Gayndah'},
'6139015':{'en': 'Melbourne'},
'6139014':{'en': 'Melbourne'},
'6139017':{'en': 'Melbourne'},
'6139016':{'en': 'Melbourne'},
'6139011':{'en': 'Melbourne'},
'6139010':{'en': 'Melbourne'},
'6139013':{'en': 'Melbourne'},
'6139012':{'en': 'Melbourne'},
'61745804':{'en': 'Toowoomba'},
'6138514':{'en': 'Clayton'},
'6139909':{'en': 'Melbourne'},
'6139908':{'en': 'Melbourne'},
'6139903':{'en': 'Melbourne'},
'6139902':{'en': 'Clayton'},
'6139901':{'en': 'Melbourne'},
'6139900':{'en': 'Melbourne'},
'6139907':{'en': 'Melbourne'},
'6139906':{'en': 'Melbourne'},
'6139905':{'en': 'Clayton'},
'6139904':{'en': 'Dandenong'},
'61387447':{'en': 'Werribee'},
'61387446':{'en': 'Werribee'},
'61387445':{'en': 'Werribee'},
'61387444':{'en': 'Werribee'},
'61387443':{'en': 'Werribee/Point Cook'},
'61387442':{'en': 'Werribee'},
'61387441':{'en': 'Werribee'},
'61387440':{'en': 'Werribee'},
'6173026':{'en': 'Brisbane'},
'6173027':{'en': 'Brisbane'},
'6173024':{'en': 'Brisbane'},
'6173025':{'en': 'Brisbane'},
'6173022':{'en': 'Brisbane'},
'6173023':{'en': 'Brisbane'},
'61387449':{'en': 'Werribee'},
'6173021':{'en': 'Brisbane'},
'6138768':{'en': 'Dandenong'},
'6138769':{'en': 'Dandenong'},
'6138765':{'en': 'Dandenong'},
'6138766':{'en': 'Dandenong'},
'6138767':{'en': 'Whittlesea'},
'6138761':{'en': 'Croydon'},
'6138762':{'en': 'Dandenong'},
'61746686':{'en': 'Diamondy'},
'61742203':{'en': 'Cooktown'},
'61742207':{'en': 'Aurukun'},
'61742447':{'en': 'Cairns'},
'61746687':{'en': 'Dunmore'},
'61742204':{'en': 'South Johnstone'},
'61742205':{'en': 'Tully'},
'6138508':{'en': 'Melbourne'},
'6138509':{'en': 'Melbourne'},
'61364019':{'en': 'Smithton'},
'61364018':{'en': 'Rosebery'},
'61364017':{'en': 'Rosebery'},
'61364016':{'en': 'Rosebery'},
'6138502':{'en': 'Melbourne'},
'6138503':{'en': 'Melbourne'},
'6138504':{'en': 'Melbourne'},
'6138505':{'en': 'Clayton'},
'61364011':{'en': 'Marrawah'},
'61364010':{'en': 'Marrawah'},
'61745575':{'en': 'Tipton'},
'61741351':{'en': 'Burnett'},
'61745577':{'en': 'Warra'},
'61745576':{'en': 'Warra'},
'61745571':{'en': 'Kumbarilla'},
'61745570':{'en': 'Kumbarilla'},
'61745573':{'en': 'Tara'},
'61741350':{'en': 'Tandora'},
'61745579':{'en': '<NAME>'},
'61741353':{'en': 'Chahpingah'},
'61741352':{'en': 'Eidsvold'},
'61743336':{'en': 'Mulgildie'},
'61741357':{'en': 'Maidenwell'},
'61741356':{'en': 'Kumbia'},
'61741358':{'en': 'Nanango'},
'61746705':{'en': 'Jondaryan'},
'61740439':{'en': 'Tully'},
'61740438':{'en': 'Innisfail'},
'61740437':{'en': 'Bloomfield'},
'61740436':{'en': 'Malanda'},
'61740435':{'en': 'Ravenshoe'},
'61740434':{'en': 'Mareeba'},
'61740433':{'en': 'Gordonvale'},
'61740432':{'en': 'Babinda'},
'61740431':{'en': 'Peninsula'},
'61740430':{'en': 'Cooktown'},
'61743160':{'en': 'Childers'},
'61743161':{'en': 'Monogorilby'},
'61743162':{'en': 'Murgon'},
'61743163':{'en': 'Windera'},
'61743164':{'en': 'Wondai'},
'61743165':{'en': 'Bunker'},
'61743166':{'en': 'Maidenwell'},
'61743167':{'en': 'Boondooma'},
'61743168':{'en': 'Mundubbera'},
'61743169':{'en': 'Eidsvold'},
'61746688':{'en': '<NAME>'},
'61746105':{'en': 'Muttaburra'},
'61744226':{'en': 'Giru'},
'61744227':{'en': 'Townsville'},
'61744224':{'en': 'Yabulu'},
'61744225':{'en': 'Giru'},
'61744222':{'en': 'Clarke'},
'61744223':{'en': 'Yabulu'},
'61383629':{'en': 'Craigieburn'},
'61383628':{'en': 'Sydenham'},
'61383627':{'en': 'Craigieburn'},
'61383626':{'en': 'Sydenham'},
'61383625':{'en': 'Sydenham'},
'61383624':{'en': 'Point Cook'},
'61383623':{'en': 'Melbourne'},
'61383622':{'en': 'Point Cook'},
'61383621':{'en': 'Craigieburn'},
'61383620':{'en': 'Craigieburn'},
'61743146':{'en': 'Nanango'},
'61743147':{'en': 'Kingaroy'},
'61743144':{'en': 'Childers'},
'61743145':{'en': 'Manumbar'},
'61743142':{'en': 'Johnstown West'},
'61746107':{'en': 'North Star'},
'61743143':{'en': 'Maidenwell'},
'61363096':{'en': 'Whitemark'},
'61363097':{'en': 'Whitemark'},
'61363094':{'en': 'Westbury'},
'61363095':{'en': 'Westbury'},
'61363092':{'en': 'Waterhouse'},
'61363093':{'en': 'Westbury'},
'61363090':{'en': 'Waterhouse'},
'61363091':{'en': 'Waterhouse'},
'61743141':{'en': 'Tiaro'},
'61363098':{'en': 'Whitemark'},
'61363099':{'en': 'Exeter'},
'61746106':{'en': 'Nobby'},
'61743148':{'en': 'Brooklands'},
'61746101':{'en': 'Mitchell'},
'61743149':{'en': 'Bunker'},
'6174041':{'en': 'Cairns'},
'6174040':{'en': 'Cairns'},
'6174042':{'en': 'Cairns'},
'6174045':{'en': 'Cairns'},
'6174044':{'en': 'Cairns'},
'6174046':{'en': 'Cairns'},
'61746100':{'en': 'Millmerran'},
'61741180':{'en': 'Chahpingah'},
'61746103':{'en': 'Morven'},
'61746927':{'en': 'Brymaroo'},
'61741869':{'en': 'Bundaberg'},
'61741868':{'en': 'Howard'},
'61741867':{'en': 'Howard'},
'61741866':{'en': 'Farnsfield'},
'61741865':{'en': 'Farnsfield'},
'61741864':{'en': 'Childers'},
'61741181':{'en': '<NAME>'},
'61741862':{'en': 'Brooweena'},
'61741861':{'en': 'Brooweena'},
'61741860':{'en': 'Booyal'},
'61746102':{'en': 'Moonie'},
'61746924':{'en': 'Jondaryan'},
'61746923':{'en': 'Jondaryan'},
'61746922':{'en': 'Jondaryan'},
'61746480':{'en': 'Arcadia Valley'},
'61746481':{'en': 'Thomson'},
'61746482':{'en': 'Winton'},
'61746483':{'en': 'Bimbadeen'},
'61746484':{'en': 'Cockatoo'},
'61746485':{'en': 'Condamine'},
'61746486':{'en': 'Barcaldine'},
'61746487':{'en': 'Barcoo'},
'61746488':{'en': 'Blackall'},
'61746489':{'en': 'Diamantina'},
'61741182':{'en': 'Kingaroy'},
'61746920':{'en': 'Oakey'},
'61741183':{'en': 'Kumbia'},
'61746211':{'en': 'Augathella'},
'6139268':{'en': 'Melbourne'},
'61741184':{'en': 'Maidenwell'},
'61746210':{'en': 'Augathella'},
'6139269':{'en': 'Melbourne'},
'61746213':{'en': 'Cunnamulla'},
'61744241':{'en': 'Woodstock'},
'61746212':{'en': 'Charleville'},
'61746215':{'en': 'Quilpie'},
'61746214':{'en': 'Morven'},
'61367092':{'en': 'Whitemark'},
'61741185':{'en': 'Nanango'},
'61746217':{'en': 'Tambo'},
'61367093':{'en': 'Avoca'},
'61746216':{'en': 'Tambo'},
'61367090':{'en': 'Westbury'},
'61367091':{'en': 'Pyengana'},
'6139260':{'en': 'Melbourne'},
'61367096':{'en': 'Waterhouse'},
'6139261':{'en': 'Melbourne'},
'61367097':{'en': 'Moltema'},
'61745276':{'en': 'Toowoomba'},
'61741186':{'en': 'Yarraman'},
'61367094':{'en': 'Ringarooma'},
'61745277':{'en': 'Toowoomba'},
'61367095':{'en': 'Campbell Town'},
'61745274':{'en': 'Warwick/Bollon'},
'61741131':{'en': 'Redridge'},
'61741130':{'en': 'Redridge'},
'61741133':{'en': 'Rosedale'},
'61741132':{'en': 'Redridge'},
'61741135':{'en': 'Rosedale'},
'61741134':{'en': 'Rosedale'},
'61741137':{'en': 'Tandora'},
'61741136':{'en': 'Tandora'},
'61741139':{'en': 'Maryborough'},
'61741138':{'en': 'Tandora'},
'61745273':{'en': 'Legume/Blackall'},
'61741187':{'en': 'Biggenden'},
'61745270':{'en': 'Elbow Valley/Bell'},
'61745271':{'en': 'Freestone/Billa Billa'},
'61362961':{'en': 'Southwest'},
'61362960':{'en': 'Southwest'},
'61362962':{'en': 'Southwest'},
'6139598':{'en': 'Melbourne'},
'6139599':{'en': 'Melbourne'},
'6139594':{'en': 'Clayton'},
'6139595':{'en': 'Melbourne'},
'6139596':{'en': 'Melbourne'},
'6139597':{'en': 'Melbourne'},
'6139590':{'en': 'Clayton'},
'6139591':{'en': 'Melbourne'},
'6139592':{'en': 'Melbourne'},
'6139593':{'en': 'Melbourne'},
'61745908':{'en': 'Westmar/Winton/Wyaga/Yelarbon/Yetman/Yuleba/Yetman/Yuleba'},
'61745904':{'en': 'Moonie/Morven/Mount Tyson/Muttaburra/Nobby/North Star/Oakey/Omanama/Paroo/Pikedale'},
'61745905':{'en': 'Pittsworth/Quilpie/Ravensbourne/Robinson Gorge/Roma/Southwood/St George/Stanthorpe/Surat/Tabers'},
'61745906':{'en': 'Talwood/Tambo/Tara/Taroom/Teelba/Texas/Thallon/Thargomindah/The Gums/Thomson'},
'61745907':{'en': 'Tipton/Toobeah/Toowoomba/Valley Downs/Wallumbilla/Wandoan/Warra/Warrego/Warwick/Westgrove'},
'61745900':{'en': 'Dulacca/Dunmore/Durham Downs/Elbow Valley/Eschol/Freestone/Galilee/Glenhope/Goombi/Goombungee'},
'61745901':{'en': 'Goondiwindi/Greenmount/Guluguba/Haddon/Haden/Helidon/Inglewood/Injune/Isisford/Jandowae'},
'61745902':{'en': 'Jericho/Jimbour/Jondaryan/Jundah/Kilbeggan/Killarney/Kumbarilla/Kupunn/Legume/Leyburn'},
'61745903':{'en': 'Liston/Longreach/Lynd Range/Macalister/Maranoa/Meandarra/Miamba/Miles/Millmerran/Mitchell'},
'61743443':{'en': 'Tansey'},
'61745802':{'en': 'Toowoomba'},
'61364608':{'en': 'Waratah'},
'61364609':{'en': 'Wynyard'},
'61364602':{'en': 'Rosebery'},
'61364603':{'en': 'Savage River'},
'61364600':{'en': 'Marrawah'},
'61364601':{'en': 'Queenstown'},
'61364606':{'en': 'Stanley'},
'61364607':{'en': 'Ulverstone'},
'61364604':{'en': 'Sheffield'},
'61364605':{'en': 'Smithton'},
'61745638':{'en': 'Arcadia Valley'},
'61745639':{'en': 'Tambo'},
'61745636':{'en': 'Thargomindah'},
'6173178':{'en': 'Brisbane'},
'61745634':{'en': 'Thallon'},
'61745635':{'en': 'Thargomindah'},
'61745632':{'en': 'Tambo'},
'61745633':{'en': 'Thallon'},
'61745630':{'en': 'Talwood'},
'61745631':{'en': 'Tambo'},
'61730815':{'en': 'Beenleigh'},
'6173172':{'en': 'Brisbane'},
'6173173':{'en': 'Brisbane'},
'61745189':{'en': 'Dirranbandi'},
'61383380':{'en': 'Craigieburn'},
'61383381':{'en': 'Craigieburn'},
'61383382':{'en': 'Point Cook'},
'61383383':{'en': 'Craigieburn'},
'61383384':{'en': 'Sydenham'},
'61383385':{'en': 'Craigieburn'},
'61383386':{'en': 'Sydenham'},
'61383387':{'en': 'Point Cook'},
'61383388':{'en': 'Sydenham'},
'61383389':{'en': 'Point Cook'},
'61743399':{'en': 'Bundaberg'},
'61743398':{'en': 'Bundaberg'},
'61743393':{'en': 'Bundaberg'},
'61743392':{'en': 'Bundaberg'},
'61743391':{'en': 'Bundaberg'},
'6174121':{'en': 'Maryborough'},
'61743397':{'en': 'Bundaberg'},
'61743396':{'en': 'Bundaberg'},
'61743395':{'en': 'Bundaberg'},
'61743394':{'en': 'Bundaberg'},
'61745187':{'en': 'Bollon/Diamantina'},
'61742152':{'en': 'Kidston'},
'61742153':{'en': 'Peninsula'},
'61742150':{'en': 'Molloy'},
'61742151':{'en': 'Weipa'},
'61738849':{'en': 'Beenleigh'},
'61738848':{'en': 'Beenleigh'},
'61742154':{'en': 'Innisfail'},
'61742155':{'en': '<NAME>'},
'61738845':{'en': 'Ipswich'},
'61738844':{'en': 'Cleveland'},
'61738847':{'en': 'Beenleigh'},
'61738846':{'en': 'Ipswich'},
'61738841':{'en': 'Redcliffe'},
'61738840':{'en': 'Samford'},
'61738843':{'en': 'Cleveland'},
'61738842':{'en': 'Beenleigh'},
'61741456':{'en': 'Bunker'},
'61385665':{'en': 'Clayton'},
'61741454':{'en': 'Wondai'},
'61741455':{'en': 'Bundaberg'},
'61741452':{'en': 'Tansey'},
'61741453':{'en': 'Windera'},
'61741450':{'en': 'Murgon'},
'61385664':{'en': 'Clayton'},
'61741458':{'en': '<NAME>'},
'61741459':{'en': 'Lowmead'},
'61731521':{'en': 'Brisbane'},
'61731520':{'en': 'Brisbane'},
'61731523':{'en': 'Brisbane'},
'61731522':{'en': 'Ipswich'},
'61731525':{'en': 'Brisbane'},
'61731524':{'en': 'Brisbane'},
'61731527':{'en': 'Brisbane'},
'61731526':{'en': 'Brisbane'},
'61731529':{'en': 'Brisbane'},
'61731528':{'en': 'Brisbane'},
'61746043':{'en': 'Cambooya'},
'6136218':{'en': 'Hobart'},
'6136212':{'en': 'Hobart'},
'6136213':{'en': 'Hobart'},
'6136210':{'en': 'Hobart'},
'6136211':{'en': 'Hobart'},
'6136216':{'en': 'Hobart'},
'6136214':{'en': 'Hobart'},
'6136215':{'en': 'Hobart'},
'61385669':{'en': 'Melbourne'},
'61385668':{'en': 'Melbourne'},
'61382031':{'en': 'Croydon'},
'61382030':{'en': 'Croydon'},
'61382032':{'en': 'Croydon'},
'61382034':{'en': 'Croydon'},
'61746542':{'en': 'Charleville'},
'61387609':{'en': 'Werribee'},
'61387608':{'en': 'Sunbury'},
'61387601':{'en': 'Dandenong'},
'61387600':{'en': 'Dandenong'},
'61387603':{'en': 'Sunbury'},
'61387602':{'en': 'Kalkallo'},
'61387605':{'en': 'Whittlesea'},
'61387604':{'en': 'Werribee'},
'61387607':{'en': 'Kalkallo'},
'61387606':{'en': 'Dandenong'},
'6173228':{'en': 'Brisbane'},
'6173229':{'en': 'Brisbane'},
'6173224':{'en': 'Brisbane'},
'6173225':{'en': 'Brisbane'},
'6173226':{'en': 'Brisbane'},
'6173227':{'en': 'Brisbane'},
'6173220':{'en': 'Brisbane'},
'6173221':{'en': 'Brisbane'},
'6173222':{'en': 'Brisbane'},
'6173223':{'en': 'Brisbane'},
'61745935':{'en': 'Chinchilla'},
'61745041':{'en': 'Tipton'},
'6173048':{'en': 'Redcliffe'},
'6173049':{'en': 'Redcliffe'},
'61741638':{'en': 'Yarraman'},
'61741639':{'en': 'Yarraman'},
'6173040':{'en': 'Brisbane'},
'6173041':{'en': 'Brisbane'},
'6173042':{'en': 'Cleveland'},
'6173043':{'en': 'Beenleigh'},
'6173044':{'en': 'Ipswich'},
'6173045':{'en': 'Redcliffe'},
'6173046':{'en': | |
loaded. To load it, do `{ctx.prefix}load mod`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}ban {user.id}"
else:
msg.content = f"{ctx.prefix}ban {user.id} {reason}"
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 5 {user.id}"
else:
msg.content = f"{ctx.prefix}warn 5 {user.id} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 4:
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :repeat_one: SoftBan", f"Why do you want to softban {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :repeat_one: SoftBan", f"Do you really want to softban {user}?", actual_color, user, reason, None, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :repeat_one: SoftBan", f"The user {user} has been softbanned!", actual_thumbnail, actual_color, user, show_author, None, reason)
if not fake_action:
if not warn_system_exist:
if not ctx.bot.get_cog("Mod"):
await ctx.send(f"The cog Mod is not loaded. To load it, do `{ctx.prefix}load mod`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}softban {user.id}"
else:
msg.content = f"{ctx.prefix}softban {user.id} {reason}"
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 4 {user.id}"
else:
msg.content = f"{ctx.prefix}warn 4 {user.id} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 5:
try:
duration = await utils.duration_ask(ctx, duration, "Sanctioning a member - :dash: TempBan", f"How long do you want to tempban {user}? (Set `cancel` to cancel)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :dash: TempBan", f"Why do you want to tempban {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :dash: TempBan", f"Do you really want to tempban {user}?", actual_color, user, reason, duration, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :dash: TempBan", f"The user {user} has been tempban!", actual_thumbnail, actual_color, user, show_author, duration, reason)
if not fake_action:
if not warn_system_exist:
if not ctx.bot.get_cog("Mod"):
await ctx.send(f"The cog Mod is not loaded. To load it, do `{ctx.prefix}load mod`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}tempban {user.id} {duration}"
else:
msg.content = f"{ctx.prefix}tempban {user.id} {duration} {reason}"
await ctx.send(f"{ctx.prefix}tempban {user.id} {duration} {reason}")
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 5 {user.id} {duration}"
else:
msg.content = f"{ctx.prefix}warn 5 {user.id} {duration} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 6:
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :boot: Kick", f"Why do you want to kick {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :boot: Kick", f"Why do you want to kick {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, reason, None, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :boot: Kick", f"The user {user} has been kicked!", actual_thumbnail, actual_color, user, show_author, None, reason)
if not fake_action:
if not warn_system_exist:
if not ctx.bot.get_cog("Mod"):
await ctx.send(f"The cog Mod is not loaded. To load it, do `{ctx.prefix}load mod`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}kick {user.id}"
else:
msg.content = f"{ctx.prefix}kick {user.id} {reason}"
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 3 {user.id}"
else:
msg.content = f"{ctx.prefix}warn 3 {user.id} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 7:
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :mute: Mute", f"Why do you want to mute {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :mute: Mute", f"Do you really want to mute {user}?", actual_color, user, reason, None, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :mute: Mute", f"The user {user} has been muted!", actual_thumbnail, actual_color, user, show_author, None, reason)
if not fake_action:
if not warn_system_exist:
if not ctx.bot.get_cog("Mutes"):
await ctx.send(f"The cog Mutes is not loaded. To load it, do `{ctx.prefix}load mutes`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}mute {user.id}"
else:
msg.content = f"{ctx.prefix}mute {user.id}"
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 2 {user.id}"
else:
msg.content = f"{ctx.prefix}warn 2 {user.id} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 8:
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :punch: MuteChannel", f"Why do you want to mute {user} in {ctx.channel.mention}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :punch: MuteChannel", f"Do you really want to mute {user} in {ctx.channel.mention}?", actual_color, user, reason, None, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :punch: MuteChannel", f"The user {user} has been muted in #{ctx.channel.name}!", actual_thumbnail, actual_color, user, show_author, None, reason)
if not fake_action:
if not ctx.bot.get_cog("Mutes"):
await ctx.send(f"The cog Mutes is not loaded. To load it, do `{ctx.prefix}load mutes`.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}mutechannel {user.id}"
else:
msg.content = f"{ctx.prefix}mutechannel {user.id}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 9:
try:
duration = await utils.duration_ask(ctx, duration, "Sanctioning a member - :hourglass_flowing_sand: TempMute", f"How long do you want to tempmute {user}? (Set `cancel` to cancel)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :hourglass_flowing_sand: TempMute", f"Why do you want to tempmute {user}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :hourglass_flowing_sand: TempMute", f"Do you really want to tempmute {user}?", actual_color, user, reason, duration, actual_timeout)
except Timeout_or_Cancel:
return
message = await utils.finish_message(ctx, finish_message, "Sanctioning a member - :hourglass_flowing_sand: TempMute", f"The user {user} has been tempmuted!", actual_thumbnail, actual_color, user, show_author, duration, reason)
if not fake_action:
if not warn_system_exist:
if not ctx.bot.get_cog("Mutes"):
await ctx.send(f"The cog Mutes is not loaded. To load it, do `{ctx.prefix}load mutes`. You can also install/load the WarnSystem cog.")
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}mute {user.id} {duration}"
else:
msg.content = f"{ctx.prefix}mute {user.id} {duration} {reason}"
ctx.bot.dispatch("message", msg)
else:
msg = copy(ctx.message)
msg.author = ctx.author
msg.channel = ctx.channel
if reason == "not":
msg.content = f"{ctx.prefix}warn 2 {user.id} {duration}"
else:
msg.content = f"{ctx.prefix}warn 2 {user.id} {duration} {reason}"
ctx.bot.dispatch("message", msg)
reactions = ["✅"]
start_adding_reactions(message, reactions)
return
elif action == 10:
try:
duration = await utils.duration_ask(ctx, duration, "Sanctioning a member - :hourglass: TempMuteChannel", f"How long do you want to tempmute {user} in {ctx.channel.mention}? (Set `cancel` to cancel)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
reason = await utils.reason_ask(ctx, reason, actual_reason_required, "Sanctioning a member - :hourglass: TempMuteChannel", f"Why do you want to tempmute {user} in {ctx.channel.mention}? (Set `cancel` to cancel or `not` to none)", actual_color, user, actual_timeout)
except Timeout_or_Cancel:
return
try:
confirmation = await utils.confirmation_ask(ctx, confirmation, "Sanctioning a member - :hourglass: TempMuteChannel", f"Do you really want to tempmute {user} in {ctx.channel.mention}?", actual_color, user, reason, duration, actual_timeout)
except Timeout_or_Cancel:
return
message = await | |
'y', 'z'], description='Slice plane', disabled=False, layout= widgets.Layout(display='flex', flex_flow='row'))
left_widgets = widgets.VBox([dump, slice_index, quantity, direction], layout=widgets.Layout(width="30%"))
# center widgets in graph settings
value_label = widgets.Label(value="Value Range to Display:")
value_min = widgets.BoundedFloatText(value=-85., min=-1000., max=84.9999999, step=1e-7, readout_format=".7f", description="Min Value", layout=widgets.Layout(width='335px'))
value_max = widgets.BoundedFloatText(value=85, min=-84.9999999, max=1000., step=1e-7, readout_format=".7f", description="Max Value", layout=widgets.Layout(width='335px'))
log_label = widgets.Label(value="Log Value Range to Display (Applied symmetrically):")
log_min = widgets.BoundedFloatText(value=1e-5, min=1e-7, max=9.99999e-2, step=1e-7, readout_format=".7f", description="Log Min", layout=widgets.Layout(width='335px'))
log_max = widgets.BoundedFloatText(value=1e-1, min=1.0001e-5, max=2e4, step=1e-7, readout_format=".7f", description="Log Max", layout=widgets.Layout(width='335px'))
value_range = widgets.VBox([value_label, value_min, value_max], layout=widgets.Layout(width='100%'))
value_range.layout.display = 'none' # hide these controls intially as log == True on init
log_range = widgets.VBox([log_label, log_min, log_max], layout=widgets.Layout(width='100%'))
log = widgets.Checkbox(value=True, description='Log Values', disabled=False, indent=False)
center_widgets = widgets.VBox([value_range, log_range, log], layout=widgets.Layout(width='30%'));
# right-most widgets in graphs settings
save_load_label = widgets.Label(value="Save/Load Configuration (.pickle):")
save_filename = widgets.Text(value='', placeholder='Filename (w/out extension)', disabled=False, layout=widgets.Layout(width='200px'))
save_button = widgets.Button(description='Save Config', tooltip='Save Config', disabled=False)
save_combo = widgets.HBox([save_button, save_filename], layout=widgets.Layout(margin="0px 0px 0px 20px"))
load_filename = widgets.Text(value='', placeholder='Filename (w/out extension)', disabled=False, layout=widgets.Layout(width='200px'))
load_button = widgets.Button(description='Load Config', tooltip='Load Config', disabled=False)
load_combo = widgets.HBox([load_button, load_filename], layout=widgets.Layout(margin="0px 0px 0px 20px"))
right_widgets = widgets.VBox([save_load_label, save_combo, load_combo], layout=widgets.Layout(width='30%'))
colours, ranges, colour_select, colourpicker = self.get_colourpicker(6)
graph_settings = widgets.HBox([left_widgets, center_widgets, right_widgets])
ui = widgets.Tab(children=[graph_settings, colourpicker])
ui.set_title(0, "Graph Settings")
ui.set_title(1, "Colour Picker")
def plot(dump, quantity, direction, log, slice_index, vmin, vmax, log_min, log_max, colour0, colour1, colour2, colour3, colour4, colour5, colour6, \
colour7, colour8, colour9, colour10, colour11, range0, range1, range2, range3, range4, range5, range6, range7, range8, range9, range10, range11, colour_select):
if log == True:
self.slice_plot(dump, quantity, direction, log_min, log_max, log, slice_index, [colour0, colour1, colour2, colour3, colour4, colour5, colour6, colour7, \
colour8, colour9, colour10, colour11], [range0, range1, range2, range3, range4, range5, range6, range7, range8, range9, range10, range11], colour_select, \
size, ifig, interpolation)
else:
max_min = self.slice_plot(dump, quantity, direction, vmin, vmax, log, slice_index, [colour0, colour1, colour2, colour3, colour4, colour5, colour6, \
colour7, colour8, colour9, colour10, colour11], [range0, range1, range2, range3, range4, range5, range6, range7, range8, range9, range10, range11], \
colour_select, size, ifig, interpolation)
if self.slice_gui_range_update == True:
value_min.value = max_min[0]
value_max.value = max_min[1]
self.slice_gui_range_update = False
def on_click_save(b):
pickle_info = {
'dump': dump.value,
'slice_index': slice_index.value,
'value_min': value_min.value,
'value_max': value_max.value,
'log_min': log_min.value,
'log_max': log_max.value,
'quantity': quantity.value,
'direction': direction.value,
'log': log.value,
'colour_select': colour_select.value
}
pickle_info['colours'] = []
pickle_info['ranges'] = []
for index in range(0, 12):
pickle_info['colours'].append((colours[index].value, colours[index].disabled))
pickle_info['ranges'].append((ranges[index].value, ranges[index].disabled))
try:
if save_filename.value != '':
f = open('%s.pickle' % save_filename.value, 'wb')
else:
f = open('%s.pickle' % date.today(), 'wb')
pickle.dump(pickle_info, f)
f.close()
except:
print('Failed to save file')
save_button.on_click(on_click_save)
def on_click_load(b):
if load_filename.value != '':
try:
f = open('%s.pickle' % load_filename.value, 'rb')
pickle_info = pickle.load(f)
dump.value = pickle_info['dump']
slice_index.value = pickle_info['slice_index']
value_min.value = pickle_info['value_min']
value_max.value = pickle_info['value_max']
log_min.value = pickle_info['log_min']
log_max.value = pickle_info['log_max']
quantity.value = pickle_info['quantity']
direction.value = pickle_info['direction']
log.value = pickle_info['log']
colour_select.value = pickle_info['colour_select']
saved_colours = pickle_info['colours']
saved_ranges = pickle_info['ranges']
for i in range(0, 12):
colours[i].value, colours[i].disabled = saved_colours[i][0], saved_colours[i][1]
ranges[i].value, ranges[i].disabled = saved_ranges[i][0], saved_ranges[i][1]
f.close()
except:
print('Failed to load file')
load_button.on_click(on_click_load)
def min_max_link(change):
if change['owner'].description == "Min Value":
value_max.min = value_min.value + 1e-7
elif change['owner'].description == "Log Min":
log_max.min = log_min.value + 1e-7
elif change['owner'].description == "Max Value":
value_min.max = value_max.value - 1e-7
elif change['owner'].description == "Log Max":
log_min.max = log_max.value - 1e-7
value_min.observe(min_max_link)
value_max.observe(min_max_link)
log_min.observe(min_max_link)
log_max.observe(min_max_link)
def on_ddqs_change(change):
if change['name'] == 'value' and (change['new'] != change['old']):
self.slice_gui_range_update = True
dump.observe(on_ddqs_change)
direction.observe(on_ddqs_change)
quantity.observe(on_ddqs_change)
slice_index.observe(on_ddqs_change)
def on_log_change(change):
if change['name'] == 'value' and (change['new'] != change['old']):
self.slice_gui_range_update = True
if change['new'] == True:
log_range.layout.display = 'block'
value_range.layout.display = 'none'
else:
value_range.layout.display = 'block'
log_range.layout.display = 'none'
log.observe(on_log_change)
output = widgets.interactive_output(plot, {'dump': dump, 'quantity': quantity, 'direction': direction, 'log': log, \
'slice_index': slice_index, 'vmin': value_min, 'vmax': value_max, 'log_min': log_min, 'log_max': log_max, \
'colour0': colours[0], 'colour1': colours[1], 'colour2': colours[2], 'colour3': colours[3], 'colour4': colours[4], \
'colour5': colours[5], 'colour6': colours[6], 'colour7': colours[7], 'colour8': colours[8], 'colour9': colours[9], \
'colour10': colours[10], 'colour11': colours[11], 'range0': ranges[0], 'range1': ranges[1], 'range2': ranges[2], \
'range3': ranges[3], 'range4': ranges[4], 'range5': ranges[5], 'range6': ranges[6], 'range7': ranges[7], \
'range8': ranges[8], 'range9': ranges[9], 'range10': ranges[10], 'range11': ranges[11], 'colour_select': colour_select})
display(ui, output)
def get_mollweide_data(self, dump, radius, quantity):
constants = {
'atomicnoH': 1.,
'atomicnocld': self._rprofset.get('atomicnocld', fname=0),
'fkcld': self._rprofset.get('fkcld', fname=0),
'airmu': self._rprofset.get('airmu', fname=0),
'cldmu': self._rprofset.get('cldmu', fname=0)
}
npoints = self.sphericalHarmonics_lmax(radius)[-1]
ux = self.get(1, dump)
uy = self.get(2, dump)
uz = self.get(3, dump)
ur, utheta, uphi = self.get_spherical_components(ux, uy, uz)
ur_r, utheta_r, uphi_r = self.get_spherical_interpolation(ur, radius, npoints=npoints, plot_mollweide=True)
plot_val = []
if quantity == 0:
plot_val = ur_r
elif quantity == 1:
u_t = self.get(4, dump)
plot_val = self.get_spherical_interpolation(u_t, radius, npoints=npoints)
elif quantity == 2 or quantity == 3:
fv = self.get(9, dump)
if quantity == 2:
plot_val = self.get_spherical_interpolation(fv, radius, npoints=npoints)
else:
Xcld = fv/((1. - fv)*(constants['airmu']/constants['cldmu']) + fv)
XH = constants['atomicnoH']*(constants['fkcld']/constants['atomicnocld'])*Xcld
plot_val = self.get_spherical_interpolation(XH, radius, npoints=npoints)
elif quantity == 4:
rho = self.get(8, dump)
rho_trilinear_r = self.get_spherical_interpolation(rho, radius, npoints=npoints, method='trilinear')
avg_rho_trilinear = rho_trilinear_r.mean()
plot_val = (rho_trilinear_r - avg_rho_trilinear) / avg_rho_trilinear
elif quantity == 5:
omega = self.get(6, dump)
plot_val = self.get_spherical_interpolation(omega, radius, npoints=npoints)
return {
'utheta_r': utheta_r,
'uphi_r': uphi_r,
'npoints': npoints,
'plot_val': plot_val
}
def mollweide_plot(self, quantity, log, vmin, vmax, colour_select, colours, ranges, ifig):
plot_val = self.mollweide_data['plot_val']
mollweide_plot = self.mollweide_fig.add_axes([0.1, 0.2, 0.88, 0.88], projection='mollweide')
mollweide_plot.grid("True")
cax = self.mollweide_fig.add_axes([0.12, 0.2, 0.84, 0.02])
cmap = self.build_cmap(colours, ranges, colour_select)
if log == True:
log_min = np.log10(vmin)
plot_bool = plot_val > 0
for index, val in enumerate(plot_val):
if plot_bool[index] == True:
if vmin <= val and val <= vmax:
plot_val[index] = np.log10(val) - log_min if log_min < 0 else np.log10(val) + log_min
elif val > vmax:
plot_val[index] = np.log10(vmax) - log_min if log_min < 0 else np.log10(vmax) + log_min
else:
plot_val[index] = 0
else:
if val != 0:
if -vmax <= val and val <= -vmin:
plot_val[index] = -1*np.log10(-val) + log_min if log_min < 0 else -1*np.log10(-val) - log_min
elif val < -vmax:
plot_val[index] = -1*np.log10(vmax) + log_min if log_min < 0 else -1*np.log10(vmax) - log_min
else:
plot_val[index] = 0
mollweide_plot.scatter(self.mollweide_data['uphi_r'], self.mollweide_data['utheta_r'], s=(72./self.mollweide_fig.dpi)**2, marker=',', c=plot_val, cmap=cmap, vmin=plot_val.min(), vmax=plot_val.max())
else:
mollweide_plot.scatter(self.mollweide_data['uphi_r'], self.mollweide_data['utheta_r'], s=(72./self.mollweide_fig.dpi)**2, marker=',', c=plot_val, cmap=cmap, vmin=vmin, vmax=vmax)
cbar1 = self.mollweide_fig.colorbar(mollweide_plot.collections[0], cax=cax, orientation='horizontal')
pl.show(ifig)
def mollweide_gui(self, rad_def=-1, rad_range=[0,-1], size=10, ifig=124):
self.mollweide_data_update = False
dump_min, dump_max = self.get_dump_list()[0], self.get_dump_list()[-1]
dump_mean = int(2*(-dump_min + dump_max)/3.)
# left-most widgets in graph settings
dump = widgets.IntSlider(value=dump_mean, min=dump_min, max=dump_max, step=1, description="Dump", disabled=False, continuous_update=False, orientation="horizontal", layout=widgets.Layout(width='350px'))
#Fix max radius bug:
radii = self._rprofset.get('R',fname=dump.value)
rad_max = max(radii)
rad_range = [0,rad_max] if rad_range[-1] < 0 else rad_range
#Set default to the median radius:
rad_med = np.median(radii)
rad_def = rad_med if rad_def < 0 else rad_def
radius = widgets.FloatSlider(value=rad_def, min=rad_range[0], max=rad_range[1], step=0.1, description="Radius", disabled=False, continuous_update=False, layout=widgets.Layout(width='350px'))
quantity = widgets.Dropdown(options=[('u_r', 0), ('u_t', 1), ('fv', 2), ('X_H', 3), ('rho', 4), ('|w|', 5)], value=0, description="Quantity", layout=widgets.Layout(width='200px'))
log = widgets.Checkbox(value=True, description="Log Values", disabled=False, indent=True)
quant_log = widgets.HBox([quantity, log], layout=widgets.Layout(margin='0px 0px 0px 10px'))
plot_button = widgets.Button(description="Render Plot", disabled=False, layout=widgets.Layout(margin="10px 0px 0px 20px"))
left_widgets = widgets.VBox([dump, radius, quant_log, plot_button], layout=widgets.Layout(width='30%'))
# center widgets in graph settings
value_label = widgets.Label(value="Value Range to Display:")
value_min = widgets.BoundedFloatText(value=-85., min=-1000., max=84.9999999, step=1e-7, readout_format=".7f", description="Min Value", layout=widgets.Layout(width='335px'))
value_max = widgets.BoundedFloatText(value=85, min=-84.9999999, max=1000., step=1e-7, readout_format=".7f", description="Max Value", layout=widgets.Layout(width='335px'))
log_label = widgets.Label(value="Log Value Range to Display (Applied symmetrically):")
log_min = widgets.BoundedFloatText(value=1e-3, min=1e-7, max=9.99999e-2, step=1e-7, readout_format=".7f", description="Log Min", layout=widgets.Layout(width='335px'))
log_max = widgets.BoundedFloatText(value=1e-1, min=1.0001e-3, max=2e4, step=1e-7, readout_format=".7f", description="Log Max", layout=widgets.Layout(width='335px'))
value_range = widgets.VBox([value_label, value_min, value_max], layout=widgets.Layout(width='100%'))
value_range.layout.display = 'none' # hide these controls intially as log == True on init
log_range = widgets.VBox([log_label, log_min, log_max], layout=widgets.Layout(width='100%'))
center_widgets = widgets.VBox([value_range, log_range], layout=widgets.Layout(width='30%'));
# right-most widgets in graph settings
save_load_label = widgets.Label(value="Save/Load Configuration (.pickle):")
save_button = widgets.Button(description="Save Config", disabled=False)
save_filename = widgets.Text(placeholder="Enter name w/out file extension", disabled=False, layout=widgets.Layout(width='250px'))
save_combo = widgets.HBox([save_button, save_filename], layout=widgets.Layout(margin="0px 0px 0px 20px"))
load_button = widgets.Button(description="Load Config", disabled=False)
load_filename = widgets.Text(placeholder="Enter filename w/out file extension", disabled=False, layout=widgets.Layout(width='250px'))
load_combo = widgets.HBox([load_button, load_filename], layout=widgets.Layout(margin="0px 0px 0px 20px"))
right_widgets = widgets.VBox([save_load_label, save_combo, load_combo], layout=widgets.Layout(margin="0px 0px 0px 20px", width='30%'))
# Layout of all widgets and tabs
graph_settings = widgets.HBox([left_widgets, center_widgets, right_widgets])
colours, ranges, colour_select, colourpicker = self.get_colourpicker(3)
gui = widgets.Tab(children=[graph_settings, colourpicker])
| |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import argparse
import time
import yaml
import ast
from functools import reduce
import cv2
import numpy as np
import paddle
import paddle.fluid as fluid
from preprocess import preprocess, Resize, Normalize, Permute, PadStride
from visualize import visualize_box_mask, lmk2out
# Global dictionary
SUPPORT_MODELS = {
'YOLO',
'SSD',
'RetinaNet',
'EfficientDet',
'RCNN',
'Face',
'TTF',
'FCOS',
'SOLOv2',
}
class Detector(object):
"""
Args:
config (object): config of model, defined by `Config(model_dir)`
model_dir (str): root path of __model__, __params__ and infer_cfg.yml
device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU
run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)
threshold (float): threshold to reserve the result for output.
enable_mkldnn (bool): whether use mkldnn with CPU.
enable_mkldnn_bfloat16 (bool): whether use mkldnn bfloat16 with CPU.
"""
def __init__(self,
config,
model_dir,
device='CPU',
run_mode='fluid',
threshold=0.5,
trt_calib_mode=False,
enable_mkldnn=False,
enable_mkldnn_bfloat16=False):
self.config = config
if self.config.use_python_inference:
self.executor, self.program, self.fecth_targets = load_executor(
model_dir, device=device)
else:
self.predictor = load_predictor(
model_dir,
run_mode=run_mode,
min_subgraph_size=self.config.min_subgraph_size,
device=device,
trt_calib_mode=trt_calib_mode,
enable_mkldnn=enable_mkldnn,
enable_mkldnn_bfloat16=enable_mkldnn_bfloat16)
def preprocess(self, im):
preprocess_ops = []
for op_info in self.config.preprocess_infos:
new_op_info = op_info.copy()
op_type = new_op_info.pop('type')
if op_type == 'Resize':
new_op_info['arch'] = self.config.arch
preprocess_ops.append(eval(op_type)(**new_op_info))
im, im_info = preprocess(im, preprocess_ops)
inputs = create_inputs(im, im_info, self.config.arch)
return inputs, im_info
def postprocess(self, np_boxes, np_masks, np_lmk, im_info, threshold=0.5):
# postprocess output of predictor
results = {}
if np_lmk is not None:
results['landmark'] = lmk2out(np_boxes, np_lmk, im_info, threshold)
if self.config.arch in ['SSD', 'Face']:
w, h = im_info['origin_shape']
np_boxes[:, 2] *= h
np_boxes[:, 3] *= w
np_boxes[:, 4] *= h
np_boxes[:, 5] *= w
expect_boxes = (np_boxes[:, 1] > threshold) & (np_boxes[:, 0] > -1)
np_boxes = np_boxes[expect_boxes, :]
for box in np_boxes:
print('class_id:{:d}, confidence:{:.4f},'
'left_top:[{:.2f},{:.2f}],'
' right_bottom:[{:.2f},{:.2f}]'.format(
int(box[0]), box[1], box[2], box[3], box[4], box[5]))
results['boxes'] = np_boxes
if np_masks is not None:
np_masks = np_masks[expect_boxes, :, :, :]
results['masks'] = np_masks
return results
def predict(self,
image,
threshold=0.5,
warmup=0,
repeats=1,
run_benchmark=False):
'''
Args:
image (str/np.ndarray): path of image/ np.ndarray read by cv2
threshold (float): threshold of predicted box' score
Returns:
results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,
matix element:[class, score, x_min, y_min, x_max, y_max]
MaskRCNN's results include 'masks': np.ndarray:
shape:[N, class_num, mask_resolution, mask_resolution]
'''
inputs, im_info = self.preprocess(image)
np_boxes, np_masks, np_lmk = None, None, None
if self.config.use_python_inference:
for i in range(warmup):
outs = self.executor.run(self.program,
feed=inputs,
fetch_list=self.fecth_targets,
return_numpy=False)
t1 = time.time()
for i in range(repeats):
outs = self.executor.run(self.program,
feed=inputs,
fetch_list=self.fecth_targets,
return_numpy=False)
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
np_boxes = np.array(outs[0])
if self.config.mask_resolution is not None:
np_masks = np.array(outs[1])
else:
input_names = self.predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_tensor(input_names[i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
for i in range(warmup):
self.predictor.zero_copy_run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_tensor(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.config.mask_resolution is not None:
masks_tensor = self.predictor.get_output_tensor(
output_names[1])
np_masks = masks_tensor.copy_to_cpu()
if self.config.with_lmk is not None and self.config.with_lmk == True:
face_index = self.predictor.get_output_tensor(output_names[
1])
landmark = self.predictor.get_output_tensor(output_names[2])
prior_boxes = self.predictor.get_output_tensor(output_names[
3])
np_face_index = face_index.copy_to_cpu()
np_prior_boxes = prior_boxes.copy_to_cpu()
np_landmark = landmark.copy_to_cpu()
np_lmk = [np_face_index, np_landmark, np_prior_boxes]
t1 = time.time()
for i in range(repeats):
self.predictor.zero_copy_run()
output_names = self.predictor.get_output_names()
boxes_tensor = self.predictor.get_output_tensor(output_names[0])
np_boxes = boxes_tensor.copy_to_cpu()
if self.config.mask_resolution is not None:
masks_tensor = self.predictor.get_output_tensor(
output_names[1])
np_masks = masks_tensor.copy_to_cpu()
if self.config.with_lmk is not None and self.config.with_lmk == True:
face_index = self.predictor.get_output_tensor(output_names[
1])
landmark = self.predictor.get_output_tensor(output_names[2])
prior_boxes = self.predictor.get_output_tensor(output_names[
3])
np_face_index = face_index.copy_to_cpu()
np_prior_boxes = prior_boxes.copy_to_cpu()
np_landmark = landmark.copy_to_cpu()
np_lmk = [np_face_index, np_landmark, np_prior_boxes]
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
# do not perform postprocess in benchmark mode
results = []
if not run_benchmark:
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
print('[WARNNING] No object detected.')
results = {'boxes': np.array([])}
else:
results = self.postprocess(
np_boxes, np_masks, np_lmk, im_info, threshold=threshold)
return results
class DetectorSOLOv2(Detector):
def __init__(self,
config,
model_dir,
device='CPU',
run_mode='fluid',
threshold=0.5,
trt_calib_mode=False,
enable_mkldnn=False,
enable_mkldnn_bfloat16=False):
super(DetectorSOLOv2, self).__init__(
config=config,
model_dir=model_dir,
device=device,
run_mode=run_mode,
threshold=threshold,
trt_calib_mode=trt_calib_mode,
enable_mkldn=enable_mkldnn,
enable_mkldnn_bfloat16=enable_mkldnn_bfloat16)
def predict(self,
image,
threshold=0.5,
warmup=0,
repeats=1,
run_benchmark=False):
inputs, im_info = self.preprocess(image)
np_label, np_score, np_segms = None, None, None
if self.config.use_python_inference:
for i in range(warmup):
outs = self.executor.run(self.program,
feed=inputs,
fetch_list=self.fecth_targets,
return_numpy=False)
t1 = time.time()
for i in range(repeats):
outs = self.executor.run(self.program,
feed=inputs,
fetch_list=self.fecth_targets,
return_numpy=False)
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
np_label, np_score, np_segms = np.array(outs[0]), np.array(outs[
1]), np.array(outs[2])
else:
input_names = self.predictor.get_input_names()
for i in range(len(input_names)):
input_tensor = self.predictor.get_input_tensor(input_names[i])
input_tensor.copy_from_cpu(inputs[input_names[i]])
for i in range(warmup):
self.predictor.zero_copy_run()
output_names = self.predictor.get_output_names()
np_label = self.predictor.get_output_tensor(output_names[
0]).copy_to_cpu()
np_score = self.predictor.get_output_tensor(output_names[
1]).copy_to_cpu()
np_segms = self.predictor.get_output_tensor(output_names[
2]).copy_to_cpu()
t1 = time.time()
for i in range(repeats):
self.predictor.zero_copy_run()
output_names = self.predictor.get_output_names()
np_label = self.predictor.get_output_tensor(output_names[
0]).copy_to_cpu()
np_score = self.predictor.get_output_tensor(output_names[
1]).copy_to_cpu()
np_segms = self.predictor.get_output_tensor(output_names[
2]).copy_to_cpu()
t2 = time.time()
ms = (t2 - t1) * 1000.0 / repeats
print("Inference: {} ms per batch image".format(ms))
# do not perform postprocess in benchmark mode
results = []
if not run_benchmark:
return dict(segm=np_segms, label=np_label, score=np_score)
return results
def create_inputs(im, im_info, model_arch='YOLO'):
"""generate input for different model type
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
model_arch (str): model type
Returns:
inputs (dict): input of model
"""
inputs = {}
inputs['image'] = im
origin_shape = list(im_info['origin_shape'])
resize_shape = list(im_info['resize_shape'])
pad_shape = list(im_info['pad_shape']) if im_info[
'pad_shape'] is not None else list(im_info['resize_shape'])
scale_x, scale_y = im_info['scale']
if 'YOLO' in model_arch:
im_size = np.array([origin_shape]).astype('int32')
inputs['im_size'] = im_size
elif 'RetinaNet' in model_arch or 'EfficientDet' in model_arch:
scale = scale_x
im_info = np.array([pad_shape + [scale]]).astype('float32')
inputs['im_info'] = im_info
elif ('RCNN' in model_arch) or ('FCOS' in model_arch):
scale = scale_x
im_info = np.array([pad_shape + [scale]]).astype('float32')
im_shape = np.array([origin_shape + [1.]]).astype('float32')
inputs['im_info'] = im_info
inputs['im_shape'] = im_shape
elif 'TTF' in model_arch:
scale_factor = np.array([scale_x, scale_y] * 2).astype('float32')
inputs['scale_factor'] = scale_factor
elif 'SOLOv2' in model_arch:
scale = scale_x
im_info = np.array([resize_shape + [scale]]).astype('float32')
inputs['im_info'] = im_info
return inputs
class Config():
"""set config of preprocess, postprocess and visualize
Args:
model_dir (str): root path of model.yml
"""
def __init__(self, model_dir):
# parsing Yaml config for Preprocess
deploy_file = os.path.join(model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
self.check_model(yml_conf)
self.arch = yml_conf['arch']
self.preprocess_infos = yml_conf['Preprocess']
self.use_python_inference = yml_conf['use_python_inference']
self.min_subgraph_size = yml_conf['min_subgraph_size']
self.labels = yml_conf['label_list']
self.mask_resolution = None
if 'mask_resolution' in yml_conf:
self.mask_resolution = yml_conf['mask_resolution']
self.with_lmk = None
if 'with_lmk' in yml_conf:
self.with_lmk = yml_conf['with_lmk']
self.print_config()
def check_model(self, yml_conf):
"""
Raises:
ValueError: loaded model not in supported model type
"""
for support_model in SUPPORT_MODELS:
if support_model in yml_conf['arch']:
return True
raise ValueError("Unsupported arch: {}, expect {}".format(yml_conf[
'arch'], SUPPORT_MODELS))
def print_config(self):
print('----------- Model Configuration -----------')
print('%s: %s' % ('Model Arch', self.arch))
print('%s: %s' % ('Use Paddle Executor', self.use_python_inference))
print('%s: ' % ('Transform Order'))
for op_info in self.preprocess_infos:
print('--%s: %s' % ('transform op', op_info['type']))
print('--------------------------------------------')
def load_predictor(model_dir,
run_mode='fluid',
batch_size=1,
device='CPU',
min_subgraph_size=3,
trt_calib_mode=False,
enable_mkldnn=False,
enable_mkldnn_bfloat16=False):
"""set AnalysisConfig, generate AnalysisPredictor
Args:
model_dir (str): root path of __model__ and __params__
device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU
trt_calib_mode (bool): If the model is produced by TRT offline quantitative
calibration, trt_calib_mode need to set True
enable_mkldnn (bool): Whether use mkldnn with CPU, default is False
enable_mkldnn_bfloat16 (bool): Whether use mkldnn bfloat16 with CPU, default is False
Returns:
predictor (PaddlePredictor): AnalysisPredictor
Raises:
ValueError: predict by TensorRT need device == GPU.
"""
if device != 'GPU' and not run_mode == 'fluid':
raise ValueError(
"Predict by TensorRT mode: {}, expect device==GPU, but device | |
("The work of the authors cited in this review has been supported by FONDECYT grant numbers: 1110352 and 1150200 to MA; 1141088 to JF; DIPOG grant 391340281 to JF; FONDECYT Postdoctoral fellow 3170497 to JC and 3190843 to AE.",
{"entities": []}),
#130
("An earlier onset of OCD symptoms is observed in men compared with women (Mathis et al., 2011), with women showing more prevalence of contamination and cleaning symptoms (Labad et al., 2008).",
{"entities": [(73, 79, PER), (88, 92, "DATE"), (170, 175, PER), (184, 188, "DATE")]}),
("Second, we entered the gene list, selected identifiers as official gene symbols, selected list types as gene lists, and submitted lists.",
{"entities": []}),
("For given DEGs identified between the isotonic NP cells and hypertonic NP cells by the above analysis (see the “Identification of DEGs” section), function and pathway enrichment analysis was carried out with the following ontology sources",
{"entities": []}),
("Enrichment analysis of gene ontology and KEGG by gene set enrichment analysis (GSEA)\nThe gene sequences were obtained from isotonic and hypertonic NP cells.",
{"entities": []}),
("In this study, we performed GSEA analysis on gene sequences of nucleus pulposus under isotonic and hypertonic NP cells as follows.",
{"entities": [(63, 79, LABEL)]}),
("The R language was used to perform the clustering analysis of significant genes based on the gene expression level. ",
{"entities": []}),
("To perform the Gene Ontology and KEGG analysis of DEGs, the DAVID online tool was implemented.",
{"entities": []}),
("However, the role of CXCL13 in pain was contrary to our expectation.",
{"entities": []}),
("Chapman reported that somatostatin has a marked antinociceptive function",
{"entities": [(0, 7, PER), (22, 34, NT)]}),
("Finally, the enrichment results of Gene Ontology and KEGG are presented.",
{"entities": []}),
("Through gene enrichment analysis of DEGs, the influence of osmotic pressure on gene expression in NP cells was comprehensively summarized: regulation of extracellular matrix organization and the JAK/STAT signalling pathway.",
{"entities": [(195, 203, PHYS)]}),
("Metascape (http://metascape.org/gp/index.html#/main/step1)74 was used to construct the PPI network and screen the significant module.",
{"entities": []}),
("Moreover, the Search Tool for the Retrieval of Interacting Genes (STRING, http://string.embl.de/) was also applied to construct the PPI network, and Cytoscape was used to present the network",
{"entities": []}),
("A Venn diagram was delineated to identify significant common genes among “Metascape_MCODE”, “Cytoscape_MCODE”, and “Cytoscape_cytoHubba” by FunRich software (http://www.funrich.org).",
{"entities": []}),
("The comparative toxicogenomics database (http://ctdbase.org/) is a web-based tool that provides information about interactions between chemicals and gene products and their relationships to diseases",
{"entities": []}),
("There was also a positive association between the relative expression of OPRL1 and the relative expression of CCL5.",
{"entities": [(73, 78, PHYS), (110, 114, PHYS)]}),
("The miRNAs that regulate the four significant genes were screened out with TargetScan",
{"entities": [(4, 10, PHYS)]}),
("However, the expression of CXCL13 was negatively related to the expression of CCL5.",
{"entities": [(27, 33, PHYS), (78, 82, PHYS)]}),
("Enrichment plot: Gene Ontology_LYSOSOME_LOCALIZATION.",
{"entities": []}),
("Ontology enrichment analysis by GSEA indicated that 1274/4081 genes were upregulated in hypertonic NP cells.",
{"entities": []}),
#150
("Two of three available reviewers (TH, JB, or NH) independently screened the titles/abstracts for eligibility.",
{"entities": []}),
("The content is solely the responsibility of the authors and does not necessarily represent the official views of the NIH, the Food and Drug Administration, or the American Heart Association.",
{"entities": []}),
("While other disciplines (e.g. economics, [12] moral reasoning, [13] social psychology [14]) have explored the conditions under which hypothetical decisions accurately reflect real-world decisions.",
{"entities": []}),
("Table 1 presents participant characteristics by tobacco use status. Of the total sample, 65.4% were never users (n\xa0=\xa0743), 4.7% were cigarette only users (n\xa0=\xa053), 3.3% were smokeless only users (n\xa0=\xa038), 5.6% were e-cigarette only users (n\xa0=\xa064), and 21.0% were polytobacco users (n\xa0=\xa0238).",
{"entities": []}),
("In a US regional adolescent sample (11-18\u2009years of age), over 80% of those engaged in opioid PDM endorsed pain relief as a motive, though other motives could be selected as well.",
{"entities": []}),
("In particular, the links between major depressive disorder, depressive symptoms and/or suicidality and opioid PDM are robust and well-established in adolescents.",
{"entities": []}),
("Limited evidence from the NSDUH suggests that links between opioid PDM and other substance use vary by age group,101 with evidence both that past-year benzodiazepine PDM is less likely with aging among those engaged in opioid PDM but that frequency of past-year benzodiazepine PDM is somewhat higher in those 26 and older.",
{"entities": []}),
("Notably, while the pattern of stimulant PDM prevalence mirrors other medication classes, nationally representative US data and multi-national administrative pharmacy data indicate that stimulant use patterns differ, with increases through childhood and adolescence to a peak in young adulthood, followed by declines.",
{"entities": []}),
("The presence of antinociceptive activity at a low level of NO suggests the importance of NO in the modulation of nociceptive transmission.",
{"entities": [(59, 61, NT), (89, 91, NT)]}),
("Based on the results obtained, it is plausible to suggest that modulation of cGMP synthesis plays a role in the nociceptive transmission.",
{"entities": [(77, 80, PHYS)]}),
#160
("However, the inhibition of guanylyl cyclase neither helps to enhance nor play a role in the antinociceptive activity of MEDL.",
{"entities": [(27, 43, PHYS), (120, 124, PHYS), (92, 116, FUNC)]}),
("Finally, no association was found between housing status (using the categories No Housing Problem, Housing Problem, No Fixed Abode), χ2 (2) = 2.05, p > .05.",
{"entities": []}),
("Functional annotation for DEGs with database for annotation, visualization and integrated discovery (DAVID)\nThe DAVID (https://david.ncifcrf.gov/home.jsp) (version 6.8)71 was one online analysis tool suite with the functional annotation for Gene Ontology72 and Kyoto Encyclopedia of Genes and Genomes (KEGG)73.",
{"entities": []}),
("As a fully functional package, the limma R package included the original data input and preprocessing capabilities of complementary DNA (cDNA) chips extracted from NP cells, as well as a linear model for analysing differentially expressed genes.",
{"entities": []}),
("Oh found that subcutaneous injection of CCL5 in mice produced allodynia 32.",
{"entities": [(0, 2, PER), (41, 45, PHYS), (62, 71, FUNC)]}),
("In addition, the analgesic activity of morphine could be attenuated by chemokines, especially CCL5 and CXCL1236.",
{"entities": [(17, 35, FUNC), (94, 98, PHYS), (103, 111, PHYS)]}),
("KEGG enrichment analysis by GSEA indicated that 70/168 gene sets were upregulated in hypertonic NP cells compared to isotonic NP cells, while 98/168 gene sets were downregulated.",
{"entities": []}),
("First, while our search strategy sought to encompass as many synonyms for ‘hypothetical’ and ‘real-world’ decisions as possible, there are likely studies touching on this issue that were not captured by our search.",
{"entities": []}),
("Five distinct subtypes of muscarinic acetylcholine receptors (M1–M5) have been identified in the human genome [1].",
{"entities": [(26, 60, PHYS)]}),
#170
("M5 muscarinic receptors are expressed solely in the substantia nigra and ventral tegmental area (VTA) [7].",
{"entities": [(0, 23, PHYS), (52, 68, LABEL), (73, 95, LABEL), (97, 100, LABEL)]}),
("Cholinergic neurons in the nucleus accumbens (NAcc) project to VTA, where they stimulate dopaminergic neurons.",
{"entities": [(0, 9, NT), (27, 44, LABEL), (46, 50, LABEL), (63, 66, LABEL), (79, 101, FUNC)]}),
("The VTA dopaminergic neurons project back to NAcc, where they stimulate cholinergic neurons.",
{"entities": [(4, 7, LABEL), (8, 20, NT), (45, 49, LABEL), (62, 83, FUNC)]}),
("This positive feedback works as the reward circuit [87].",
{"entities": []}),
("Gastro-oesophageal reflux18 (17.3)Food allergy/intolerance/sensitivity17 (16.4)Dysbiosis or parasites8 (7.7)Liver and biliary dysfunction and disease6 (5.8)",
{"entities": []}),
("Sociodemographic characteristicsn (%)Patient Sex (n = 851) Female618 (72.6) Male233 (27.4)Patient Age (n = 835) Up to 5 years21 (2.5) 6–12 years21 (2.5) 13–17 years10 (1.2) 18–25 years56 (6.7) 26–35 years129 (15.5) 36–45 years169 (20.2) 46–55 years163 (19.5) 56–65 years161 (19.3) 66–75 years68 (8.1) 76–85 years28 (3.4) 86 years and over9 (1.1)",
{"entities": []}),
("The study rigorously followed | |
import pandas as pd
import numpy as np
import geopandas as gpd
import math
import fiona
import rasterio
import glob
import os
import pickle
import affine6p
import matplotlib
import time
import datetime
from matplotlib import pyplot as plt
import rasterio.mask as rmask
from rasterio.merge import merge
from rasterio.plot import show
from rasterio.transform import Affine
from shapely.geometry import Polygon, mapping
from config import config_cls
config = config_cls[os.getenv('ENV', 'default')]
################################# Preprocessing Functions #################################
bucket = config.DATA_ROOT / 'interim' / 'sugar_files_FLI'
def preprocess_pngs():
"""Preprocess geo data and save intermediate results on harddisk. Run once (if files are generated, no need to run)
"""
_ = get_corners()
pngS_to_geotifS(png_folder=f"TCI_tiles", affine_option='4corners')
return
################################ This is the main function ################################
def ROI_tifs(ROI):
"""Based on input ROI and preprocess files, find relevant satellite images and clip them with ROI.
Save the clipped images on harddrive and return path informaiton and timestamp information
Parameters
----------
ROI : dict (json), need to contain the below information (as required by front end)
{"geometry": {"type": "Polygon",
"coordinates": [[148.60709030303114, -20.540043246963264],
[148.69607543743531, -20.539590412428996],
[148.6865658493269, -20.595756032466892],
[148.6275658455197,-20.606209452942387]]}}
"""
# Prepare shared datasets
corners_geo, _, tif_list, xy_w_geo_wo_tif = load_preprocessed()
ROI = Polygon(ROI["geometry"]["coordinates"])
ROI_tiles_XY = pair_ROI_to_tiles(ROI, corners_geo)
ROI_tiles_XY = list(set(ROI_tiles_XY) - xy_w_geo_wo_tif)
tasks = tif_list.loc[ROI_tiles_XY].reset_index()
dates_unix = pd.to_datetime(tasks['date']).sort_values().unique().astype(np.int64) // 10**6
if len(ROI_tiles_XY)==0:
print("No tiles matched to the ROI. Please select another region.")
# FIXME: what is the API if no corresponding tile is found?
return
tasks = tif_list[tif_list.index.isin(ROI_tiles_XY)]
if len(ROI_tiles_XY)==1:
tif_infos = ROI_one_tile([mapping(ROI)], ROI_tiles_XY[0][0], ROI_tiles_XY[0][1], tasks['date'].values, corners_geo, save_format='png')
else:
print("Loading satellite images for the selected zone...")
tif_infos = {}
# For each tile, clip the tile with ROI and save as TIF
for xy in ROI_tiles_XY:
task = tasks.loc[xy]
_ = ROI_one_tile([mapping(ROI)], xy[0], xy[1], task['date'].values, corners_geo)
for unix_x in dates_unix:
merged_array = merge_tiles(ROI_tiles_XY, unix_x, tif_folder = f"{bucket}/results/single")
tif_infos[unix_x] = f"{bucket}/results/png/{unix_x}.png"
save_png(merged_array, tif_infos[unix_x])
print("Finished!")
return {"png_path":tif_infos, "start_date":dates_unix[0], "end_date":dates_unix[-1], "all_dates":dates_unix}
def load_preprocessed():
corners_geo = gpd.read_file(f"{bucket}/intermediate/tile_geo").set_index(['X','Y'])
with open(f"{bucket}/intermediate/tile_corners.pkl", "rb") as f:
corners_coords = pickle.load(f).set_index(['X','Y'])
tif_list = ls_images(f"{bucket}/intermediate/geotifs/*.tif").sort_values(['x','y','date']).set_index(['x','y'])
xy_w_geo_wo_tif = set(corners_geo.index.unique().values) - set(tif_list.index.unique().values)
return corners_geo, corners_coords, tif_list, xy_w_geo_wo_tif
################################# Preprocessing Functions #################################
def get_corners(folder="geometries"):
"""List all geojson files in the folder of GCS, return the four corners for all files in a pd.DataFrame indexed by tile X&Y."""
geofiles = glob.glob(f"{bucket}/{folder}/*.geojson")
Xs = pd.Series([path.split("geo-x")[1].split("-")[0] for path in geofiles]).astype(int)
Ys = pd.Series([path.split("geo-x")[1].split("-y")[1].split(".")[0] for path in geofiles]).astype(int)
all_corners = pd.Series([get_corner(geofile) for geofile in geofiles])
all_corners = pd.concat([Xs, Ys, all_corners], axis=1)
all_corners.columns = ["X", "Y", "corners"]
all_corners['geometry'] = all_corners.apply(lambda row:Polygon(row["corners"]), axis=1)
all_corners = gpd.GeoDataFrame(all_corners, geometry=all_corners["geometry"], crs={'init': 'epsg:4326'})
all_corners[["X","Y","geometry"]].to_file(f"{bucket}/intermediate/tile_geo")
with open(f"{bucket}/intermediate/tile_corners.pkl", "wb") as f:
pickle.dump(all_corners[["X","Y","corners"]], f)
return all_corners
def get_corner(geofile:str):
""" Open geojson file from GCS corresponding to the xy of one tile, return the long/lat of the corners of tile
Parameters
----------
geofile : str
geofile path
Returns
-------
list of tuples
Each tuple is the long/lat of TL, TR, BR and BL corner
"""
with fiona.open(geofile, "r") as shapefile:
tile_corner = [x["geometry"] for x in shapefile][0]["coordinates"][0]
return tile_corner
def pngS_to_geotifS(png_folder="TCI_tiles", affine_option='4corners', mask_folder="masks"):
"""Convert all PNGs to geoTIFs with sugarcane field mask applied to the original png"""
# Load preprocessed inputs and list pngs to convert
with open(f"{bucket}/intermediate/tile_corners.pkl", "rb") as f:
all_corners = pickle.load(f).set_index(["X", "Y"])
tasks = ls_images(f"{bucket}/TCI_tiles/*.png")
for index, row in tasks.iterrows():
try:
# Prepare inputs
tile_x, tile_y, tile_date = row['x'], row['y'], row['date']
png_name = f"{tile_x}-{tile_y}-TCI-{tile_date}.png"
save_name = f"{tile_x}-{tile_y}-TCI-{tile_date}.tif"
field_mask_path = f"{bucket}/{mask_folder}/mask-x{tile_x}-y{tile_y}.png"
# Open png file corresponding to the xy of tiles, return the array of raster values (3*512*512) with sugarcane field mask applied
with rasterio.open(f"{bucket}/{png_folder}/{png_name}", "r") as src:
array_data = src.read() * load_filed_mask(field_mask_path)
# Save array as geotif
array_to_geotif(array_data, save_name, all_corners.loc[(tile_x, tile_y), "corners"][:4], affine_option=affine_option)
except:
print([index, row["x"], row["y"], row["date"]])
return
################################# Geo Functions #################################
def ROI_one_tile(ROI, tile_x, tile_y, dates, corners_geo, source_folder="intermediate/geotifs", mask_folder="masks", save_format='tif'):
"""Clip all geoTIFs (timesteps) for an tile by the ROI polygon.
Save the clipped raster arrays as geotif.
"""
# Clip ROI with tile square to get the intersection polygon bounds and corresponding rectangular corners
#FIXME: Only single polygon is supported, complex polygon not supported yet
inter_poly_bounds = Polygon(ROI[0]["coordinates"][0]).intersection(corners_geo.loc[(tile_x, tile_y)]).bounds
intersection_corner = [[inter_poly_bounds[0], inter_poly_bounds[3]],
[inter_poly_bounds[2], inter_poly_bounds[3]],
[inter_poly_bounds[2], inter_poly_bounds[1]],
[inter_poly_bounds[0], inter_poly_bounds[1]]
]
# Use the mask and indices to select rasters of all TIFs
results = {}
for x in dates:
unix_x = int(time.mktime(datetime.datetime.strptime(x, "%Y-%m-%d").timetuple()) * 1000)
source_path = f"{bucket}/{source_folder}/{tile_x}-{tile_y}-TCI-{x}.tif"
polygon_array = ROI_on_geotif(ROI, source_path, True)[0]
if save_format=='tif':
array_to_geotif(polygon_array, f"{tile_x}-{tile_y}-{unix_x}.tif", intersection_corner,
save_folder=f"results/single", affine_option='4corners')
elif save_format=='png':
save_png(polygon_array, f"{bucket}/results/png/{unix_x}.png")
else:
print("Save format not supported, aborting.")
return
results[unix_x] = f"results/png/{unix_x}.png"
return results
def ROI_on_geotif(ROI, geotif_path, crop):
"""Clip one tile(geoTIF) by the ROI polygon. Return the cropped raster array and tif meta.
Parameters
----------
ROI : list of geojson polygons, e.g.:
[{'type': 'Polygon',
'coordinates': [[(148.1126437690792, -20.0084977141844666),
(148.13147206605388, -20.004663808059437),
(148.131814713494, -20.010831583258326),
(148.11297164191616, -20.01114679490517)]]}]
geotif_path : str
full GCS path of the tif
crop : boolean
If true, return the clipped array; or else, return the whole array but with regions outside ROI set to nodata.
save_clipped : None or str, optional
If a str is given, save the result to the path defined by the str, by default None
Returns
-------
np.array
array of the clilpped image
"""
# Open TIF and add mask using ROI polygon
with rasterio.open(geotif_path, "r") as src:
out_image, out_transform = rasterio.mask.mask(src, ROI, crop=crop)
out_meta = src.meta
# Update TIF value
out_meta.update({"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform})
return out_image, out_meta
def pair_ROI_to_tiles(ROI, gdf):
"""For a given ROI, find its intersected tiles by sjoin with tile bounding box polygons.
Return an np array of tile x&y indices.
Parameters
----------
ROI : shapely.geometry.Polygon
gdf : geopandas.dataframe
"""
precise_matched_gdf = gdf[gdf.intersects(ROI)]
return precise_matched_gdf.index.values
def array_to_geotif(array_data, tif_name, tile_corner, save_folder=f"intermediate/geotifs", affine_option='4corners'):
"""Convert an array into a geoTIF, which is a format required to intersect with ROI polygon.
"""
if affine_option=='4corners':
origin = [[1,1], [1, array_data.shape[2]], [array_data.shape[1], array_data.shape[2]], [array_data.shape[1], 1]]
convert = tile_corner
trans_matrix = affine6p.estimate(origin, convert).get_matrix()
transform = Affine(trans_matrix[0][1], trans_matrix[0][0], trans_matrix[0][2],
trans_matrix[1][1], trans_matrix[1][0], trans_matrix[1][2])
elif affine_option=='4bounds':
west = min(tile_corner[0][0], tile_corner[3][0])
south = min(tile_corner[2][1], tile_corner[3][1])
east = max(tile_corner[1][0], tile_corner[2][0])
north = max(tile_corner[0][1], tile_corner[1][1])
bearing = GetBearing(tile_corner[0], tile_corner[1])
transform = rasterio.transform.from_bounds(west, south, east, north, array_data.shape[1], array_data.shape[2]) #* Affine.rotation(bearing)
#FIXME: to be further tested
else:
print(f"Affine option {affine_option} not supported...Aborting")
return
# Save png as geoTIF
with rasterio.open(f"{bucket}/{save_folder}/{tif_name}", 'w', driver='GTiff', height=array_data.shape[1],
width=array_data.shape[2], count=3, dtype=array_data.dtype, crs='EPSG:4326',
transform=transform) as dst:
dst.write(array_data)
return transform
# Missing data not handled
def merge_tiles(ROI_tiles_XY, unix_date, tif_folder = f"{bucket}/results/single"):
src_files_to_mosaic = []
for xy in ROI_tiles_XY:
tifpath = f"{tif_folder}/{xy[0]}-{xy[1]}-{unix_date}.tif"
src = rasterio.open(tifpath)
src_files_to_mosaic.append(src)
mosaic, _ = merge(src_files_to_mosaic)
return mosaic
################################# Utility Functions #################################
def ls_images(path, flag="TCI-"):
"""List images in a gcs/local path"""
imgS = glob.glob(path)
imgS = [x.split("/")[-1].split(".")[0] for x in imgS]
Xs = pd.Series([x.split("-")[0] for x in imgS]).astype(int)
Ys = pd.Series([x.split("-")[1] for x in imgS]).astype(int)
Dates = pd.Series([x.split(flag)[1] for x in imgS])
tasks = pd.concat([Xs, Ys, Dates], axis=1)
tasks.columns = ["x","y","date"]
return tasks
def bbox2(np_array):
"""Return the indices of the bounding box of non-zero elements in a np array"""
rows = np.any(np_array, axis=1)
cols = np.any(np_array, axis=0)
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
return [ymin, ymax+1, xmin, xmax+1]
def load_filed_mask(path):
with rasterio.open(path, "r") as src:
masked_array = src.read()[3]
masked_array[masked_array>0] = 1
return masked_array
def plot_an_array(x):
fig, ax = plt.subplots()
plt.imshow(np.transpose(x,(1,2,0)))
return
def GetBearing(pointA, pointB):
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
def swapLatLon(coord):
return (coord[1],coord[0])
def save_png(input_array, save_path):
"""Transpose a (3 or 4, x, y) array into (x, y, 3 or 4) array and save it as png.
Parameters
----------
input_array : [type]
[description]
save_path : | |
<reponame>Schevo/schevo<gh_stars>1-10
"""Entity/extent unit tests."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import datetime
import random
from schevo.constant import UNASSIGNED
from schevo import error
from schevo import test
from schevo.test import CreatesSchema, raises
from schevo.transaction import Transaction
class BaseEntityExtent(CreatesSchema):
body = '''
from schevo.test import raises
class Avatar(E.Entity):
realm = f.entity('Realm')
user = f.entity('User')
name = f.string()
_key(user, realm, name)
def __unicode__(self):
return u'%s (%s in %s)' % (self.name, self.user, self.realm)
class Batch_Job(E.Entity):
name = f.string()
priority = f.integer(label='Pri.')
_key(name)
_key(priority)
@extentmethod
def t_multiple_keys_create(extent):
return T.Multiple_Keys_Create()
@extentmethod
def t_multiple_keys_update(extent):
return T.Multiple_Keys_Update()
def __unicode__(self):
return u'%s :: %i' % (self.name, self.priority)
class Realm(E.Entity):
name = f.string()
_key(name)
class _Create(T.Create):
def _undo(self):
return None
class User(E.Entity):
name = f.string()
age = f.integer(required=False)
_key(name)
_index(age)
_index(age, name)
_index(name, age)
@extentmethod
def t_create_foo_and_bar(extent):
return T.Create_Foo_And_Bar()
@extentmethod
def t_create_name_only(extent):
tx = E.User._Create()
del tx.f.age
return tx
@extentmethod
def t_trigger_key_collision(extent):
return T.Trigger_Key_Collision()
class Account(E.Entity):
"""Bank account."""
owner = f.entity('Person')
name = f.string()
balance = f.money()
overdraft_protection = f.boolean(default=False) # XXX
suspended = f.boolean(default=False) # XXX
_key(owner, name)
def t_suspend(self):
"""Suspend this account."""
tx = T.Suspend()
tx.account = self
tx.f.account.readonly = True
return tx
@with_label('Transfer Funds From This Account')
def t_transfer(self):
"""Transfer funds from this account."""
tx = T.Transfer()
tx.from_account = self
tx.f.from_account.readonly = True
tx._label = 'Transfer Funds From %s' % self
return tx
_sample_unittest = [
(('<NAME>', ), 'Personal', 204.52, False, False),
(('<NAME>', ), 'Business', 29142.75, True, False),
(('<NAME>', ), 'Family', 291.00, False, True),
(('<NAME>', ), 'Savings', 2816.50, False, False),
]
class Foo(E.Entity):
name = f.string()
user = f.entity('User', required=False)
_key(name, user)
class Gender(E.Entity):
"""Gender of a person."""
code = f.string()
name = f.string()
@f.integer()
def count(self):
return self.s.count('Person', 'gender')
_key(code)
_key(name)
class Person(E.Entity):
"""Bank account owner."""
name = f.string()
gender = f.entity('Gender', required=False)
_key(name)
_plural = 'People'
_sample_unittest = [
('<NAME>', UNASSIGNED),
('<NAME>', UNASSIGNED),
]
class Suspend(T.Transaction):
"""Suspend an account."""
account = f.entity('Account')
def _execute(self, db):
tx = self.account.t.update(suspended=True)
db.execute(tx)
class Transfer(T.Transaction):
"""Transfer money from one account to another."""
from_account = f.entity('Account')
to_account = f.entity('Account')
amount = f.money(min_value=0.00)
def _execute(self, db):
from_account = self.from_account
to_account = self.to_account
amount = self.amount
has_overdraft_protection = from_account.overdraft_protection
new_balance = from_account.balance - amount
# Validate.
if from_account.suspended or to_account.suspended:
raise Exception('An account is suspended.')
if not has_overdraft_protection and new_balance < 0.00:
raise Exception('Insufficient funds.')
# Transfer.
tx_withdraw = from_account.t.update()
tx_withdraw.balance -= amount
tx_deposit = to_account.t.update()
tx_deposit.balance += amount
db.execute(tx_withdraw, tx_deposit)
class Event(E.Entity):
"""An event must have a unique date and datetime, but none are
required."""
date = f.date(required=False)
datetime = f.datetime(required=False)
_key(date)
_key(datetime)
class Multiple_Keys_Create(T.Transaction):
def _execute(self, db):
# Whitespace is due to lack of syntax highlighting.
Batch_Job = db.Batch_Job
# Create an entity successfully.
tx = Batch_Job.t.create(name='foo', priority=1)
result = db.execute(tx)
# Creating these should fail because of collisions.
tx = Batch_Job.t.create(name='bar', priority=1)
try:
db.execute(tx)
except schevo.error.KeyCollision, e:
assert e.extent_name == 'Batch_Job'
assert e.key_spec == ('priority',)
assert e.field_values == (1,)
tx = Batch_Job.t.create(name='foo', priority=2)
try:
db.execute(tx)
except schevo.error.KeyCollision, e:
assert e.extent_name == 'Batch_Job'
assert e.key_spec == ('name',)
assert e.field_values == (u'foo',)
# Creating this should succeed as no side-effects should be
# left behind from the previous failure.
tx = Batch_Job.t.create(name='bar', priority=2)
result = db.execute(tx)
class Multiple_Keys_Update(T.Transaction):
def _execute(self, db):
Batch_Job = db.Batch_Job
# Create an entity successfully.
tx = Batch_Job.t.create(name='foo', priority=1)
result_foo = db.execute(tx)
# Create another entity successfully.
tx = Batch_Job.t.create(name='bar', priority=2)
result_bar = db.execute(tx)
# Updating the second one should fail because of collisions.
tx = result_bar.t.update(name='foo', priority=3)
try:
db.execute(tx)
except schevo.error.KeyCollision, e:
assert e.extent_name == 'Batch_Job'
assert e.key_spec == ('name',)
assert e.field_values == (u'foo',)
tx = result_bar.t.update(name='baz', priority=1)
try:
db.execute(tx)
except schevo.error.KeyCollision, e:
assert e.extent_name == 'Batch_Job'
assert e.key_spec == ('priority',)
assert e.field_values == (1,)
# Creating this should succeed as no side-effects should be
# left behind from the previous failure.
tx = Batch_Job.t.create(name='baz', priority=3)
result = db.execute(tx)
class UserRealmAvatar(T.Transaction):
def _execute(self, db):
# Create a user using attribute-setting syntax.
tx = db.User.t.create()
tx.name = 'foo'
user = db.execute(tx)
# Create a realm using attribute-setting syntax.
tx = db.Realm.t.create()
tx.name = 'bar'
realm = db.execute(tx)
# Create an avatar using keyword convenience syntax.
tx = db.Avatar.t.create(
name='baz',
user=user,
realm=realm,
)
avatar = db.execute(tx)
# Return the three.
return user, realm, avatar
def t_user_realm_avatar():
return UserRealmAvatar()
class LotsOfUsers(T.Transaction):
def _execute(self, db):
from random import randint
def randname():
name = []
for x in xrange(randint(5, 15)):
name.append(randint(ord('a'), ord('z')))
return ''.join(chr(c) for c in name)
for x in xrange(100):
name = randname()
if not db.User.find(db.User.f.name == name):
name = randname()
# Make sure that there is some overlap in 'age' to
# trigger faulty key collisions.
age = randint(20, 25)
tx = db.User.t.create(name=name, age=age)
db.execute(tx)
def t_lots_of_users():
return LotsOfUsers()
'''
def test_key_conflicts_on_create(self):
extent = db.User
# Create an entity.
tx = extent.t.create(name='foo')
user_foo = db.execute(tx)
# Attempting to create another user named 'foo' results in a
# KeyError.
self.reopen()
extent = db.User
tx = extent.t.create(name='foo')
try:
db.execute(tx)
except error.KeyCollision, e:
assert e.extent_name == 'User'
assert e.key_spec == ('name',)
assert e.field_values == (u'foo',)
def test_no_key_conflicts_on_create_if_necessary(self):
# Create an entity.
tx = db.User.t.create(name='foo')
user_foo = db.execute(tx)
# Attempting to create-if-necessary another user named 'foo'
# results in the original user.
tx = db.User.t.create_if_necessary(name='foo')
user_foo2 = db.execute(tx)
assert user_foo == user_foo2
assert user_foo2.s.rev == 0
def test_key_conflicts_on_update(self):
extent = db.User
# Create an entity.
tx = extent.t.create(name='foo')
user_foo = db.execute(tx)
# Creating another user, then attempting to rename it to 'foo'
# results in a KeyError.
self.reopen()
extent = db.User
tx = extent.t.create(name='bar')
user_bar = db.execute(tx)
tx = user_bar.t.update(name='foo')
try:
db.execute(tx)
except error.KeyCollision, e:
assert e.extent_name == 'User'
assert e.key_spec == ('name',)
assert e.field_values == (u'foo',)
def test_no_key_conflicts_on_delete(self):
extent = db.User
# Create an entity.
tx = extent.t.create(name='foo')
user_foo = db.execute(tx)
# If we delete user_foo, then attempt to recreate another user
# named 'foo', it should succeed.
self.reopen()
extent = db.User
user_foo = extent[1]
tx = user_foo.t.delete()
db.execute(tx)
self.reopen()
extent = db.User
tx = extent.t.create(name='foo')
user_foo = db.execute(tx)
assert user_foo.s.oid == 2
def test_multiple_keys_create(self):
tx = db.Batch_Job.t.multiple_keys_create()
db.execute(tx)
self.reopen()
assert len(db.Batch_Job) == 2
assert db.Batch_Job[1].name == 'foo'
assert db.Batch_Job[1].priority == 1
assert db.Batch_Job[2].name == 'bar'
assert db.Batch_Job[2].priority == 2
def test_multiple_keys_update(self):
tx = db.Batch_Job.t.multiple_keys_update()
db.execute(tx)
self.reopen()
assert len(db.Batch_Job) == 3
assert db.Batch_Job[1].name == 'foo'
assert db.Batch_Job[1].priority == 1
assert db.Batch_Job[2].name == 'bar'
assert db.Batch_Job[2].priority == 2
assert db.Batch_Job[3].name == 'baz'
assert db.Batch_Job[3].priority == 3
def test_date_datetime_keys(self):
dt = datetime.datetime.now()
d = dt.date()
t = dt.time()
# Create and delete events that don't conflict with keys to
# make sure they are persisted correctly.
event1 = db.execute(db.Event.t.create(datetime=dt))
assert event1.datetime == dt
event2 = db.execute(db.Event.t.create(date=d))
assert event2.date == d
db.execute(event1.t.delete())
db.execute(event2.t.delete())
event1 = db.execute(db.Event.t.create(date=d))
assert event1.date == d
event2 = db.execute(db.Event.t.create(datetime=dt))
assert event2.datetime == dt
def test_entity_reference_resolution_create(self):
user, realm, avatar = self.db.execute(db.t.user_realm_avatar())
# Test entity reference equality.
self.reopen()
avatar = db.Avatar[1]
user = db.User[1]
realm = db.Realm[1]
assert avatar.user == user
assert avatar.realm == realm
def test_entity_reference_resolution_update(self):
user1, realm, avatar = self.db.execute(db.t.user_realm_avatar())
# Create another user.
tx = db.User.t.create(name='foo2')
user2 = db.execute(tx)
# Update the avatar.
self.reopen()
avatar = db.Avatar[1]
user1 = db.User[1]
user2 = db.User[2]
tx = avatar.t.update(user=user2)
db.execute(tx)
# Verify that the entity reference got updated.
self.reopen()
avatar = db.Avatar[1]
user1 = db.User[1]
user2 = db.User[2]
assert avatar.user != user1
assert avatar.user == user2
def test_entity_links_create(self):
user, realm, avatar = self.db.execute(db.t.user_realm_avatar())
# No arguments to links().
self.reopen()
user = db.User[1]
realm = db.Realm[1]
avatar = db.Avatar[1]
user_links = user.s.links()
realm_links = realm.s.links()
assert len(user_links) == 1
assert len(realm_links) == 1
assert ('Avatar', 'user') in user_links
assert ('Avatar', 'realm') in realm_links
avatar_user = user_links[('Avatar', 'user')]
assert len(avatar_user) == 1
assert avatar_user[0] == avatar
avatar_realm = realm_links[('Avatar', 'realm')]
assert len(avatar_realm) == 1
assert avatar_realm[0] == avatar
# Argument to links.
user_links = user.s.links('Avatar', 'user')
assert len(user_links) == 1
assert user_links[0] == avatar
realm_links = realm.s.links('Avatar', 'realm')
assert len(realm_links) == 1
assert realm_links[0] == avatar
# Extent name typo.
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
mac: pulumi.Input[str],
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
pulumi.set(__self__, "mac", mac)
if allow_existing is not None:
pulumi.set(__self__, "allow_existing", allow_existing)
if blocked is not None:
pulumi.set(__self__, "blocked", blocked)
if dev_id_override is not None:
pulumi.set(__self__, "dev_id_override", dev_id_override)
if fixed_ip is not None:
pulumi.set(__self__, "fixed_ip", fixed_ip)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if note is not None:
pulumi.set(__self__, "note", note)
if site is not None:
pulumi.set(__self__, "site", site)
if skip_forget_on_destroy is not None:
pulumi.set(__self__, "skip_forget_on_destroy", skip_forget_on_destroy)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter
def mac(self) -> pulumi.Input[str]:
"""
The MAC address of the user.
"""
return pulumi.get(self, "mac")
@mac.setter
def mac(self, value: pulumi.Input[str]):
pulumi.set(self, "mac", value)
@property
@pulumi.getter(name="allowExisting")
def allow_existing(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
"""
return pulumi.get(self, "allow_existing")
@allow_existing.setter
def allow_existing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_existing", value)
@property
@pulumi.getter
def blocked(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this user should be blocked from the network.
"""
return pulumi.get(self, "blocked")
@blocked.setter
def blocked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blocked", value)
@property
@pulumi.getter(name="devIdOverride")
def dev_id_override(self) -> Optional[pulumi.Input[int]]:
"""
Override the device fingerprint.
"""
return pulumi.get(self, "dev_id_override")
@dev_id_override.setter
def dev_id_override(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dev_id_override", value)
@property
@pulumi.getter(name="fixedIp")
def fixed_ip(self) -> Optional[pulumi.Input[str]]:
"""
A fixed IPv4 address for this user.
"""
return pulumi.get(self, "fixed_ip")
@fixed_ip.setter
def fixed_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fixed_ip", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network ID for this user.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter
def note(self) -> Optional[pulumi.Input[str]]:
"""
A note with additional information for the user.
"""
return pulumi.get(self, "note")
@note.setter
def note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note", value)
@property
@pulumi.getter
def site(self) -> Optional[pulumi.Input[str]]:
"""
The name of the site to associate the user with.
"""
return pulumi.get(self, "site")
@site.setter
def site(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site", value)
@property
@pulumi.getter(name="skipForgetOnDestroy")
def skip_forget_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
"""
return pulumi.get(self, "skip_forget_on_destroy")
@skip_forget_on_destroy.setter
def skip_forget_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_forget_on_destroy", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The user group ID for the user.
"""
return pulumi.get(self, "user_group_id")
@user_group_id.setter
def user_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_group_id", value)
@pulumi.input_type
class _UserState:
def __init__(__self__, *,
allow_existing: Optional[pulumi.Input[bool]] = None,
blocked: Optional[pulumi.Input[bool]] = None,
dev_id_override: Optional[pulumi.Input[int]] = None,
fixed_ip: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None,
mac: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_id: Optional[pulumi.Input[str]] = None,
note: Optional[pulumi.Input[str]] = None,
site: Optional[pulumi.Input[str]] = None,
skip_forget_on_destroy: Optional[pulumi.Input[bool]] = None,
user_group_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering User resources.
:param pulumi.Input[bool] allow_existing: Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
:param pulumi.Input[bool] blocked: Specifies whether this user should be blocked from the network.
:param pulumi.Input[int] dev_id_override: Override the device fingerprint.
:param pulumi.Input[str] fixed_ip: A fixed IPv4 address for this user.
:param pulumi.Input[str] hostname: The hostname of the user.
:param pulumi.Input[str] ip: The IP address of the user.
:param pulumi.Input[str] mac: The MAC address of the user.
:param pulumi.Input[str] name: The name of the user.
:param pulumi.Input[str] network_id: The network ID for this user.
:param pulumi.Input[str] note: A note with additional information for the user.
:param pulumi.Input[str] site: The name of the site to associate the user with.
:param pulumi.Input[bool] skip_forget_on_destroy: Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
:param pulumi.Input[str] user_group_id: The user group ID for the user.
"""
if allow_existing is not None:
pulumi.set(__self__, "allow_existing", allow_existing)
if blocked is not None:
pulumi.set(__self__, "blocked", blocked)
if dev_id_override is not None:
pulumi.set(__self__, "dev_id_override", dev_id_override)
if fixed_ip is not None:
pulumi.set(__self__, "fixed_ip", fixed_ip)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ip is not None:
pulumi.set(__self__, "ip", ip)
if mac is not None:
pulumi.set(__self__, "mac", mac)
if name is not None:
pulumi.set(__self__, "name", name)
if network_id is not None:
pulumi.set(__self__, "network_id", network_id)
if note is not None:
pulumi.set(__self__, "note", note)
if site is not None:
pulumi.set(__self__, "site", site)
if skip_forget_on_destroy is not None:
pulumi.set(__self__, "skip_forget_on_destroy", skip_forget_on_destroy)
if user_group_id is not None:
pulumi.set(__self__, "user_group_id", user_group_id)
@property
@pulumi.getter(name="allowExisting")
def allow_existing(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should just take over control of an existing user. Defaults to `true`.
"""
return pulumi.get(self, "allow_existing")
@allow_existing.setter
def allow_existing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_existing", value)
@property
@pulumi.getter
def blocked(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this user should be blocked from the network.
"""
return pulumi.get(self, "blocked")
@blocked.setter
def blocked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blocked", value)
@property
@pulumi.getter(name="devIdOverride")
def dev_id_override(self) -> Optional[pulumi.Input[int]]:
"""
Override the device fingerprint.
"""
return pulumi.get(self, "dev_id_override")
@dev_id_override.setter
def dev_id_override(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "dev_id_override", value)
@property
@pulumi.getter(name="fixedIp")
def fixed_ip(self) -> Optional[pulumi.Input[str]]:
"""
A fixed IPv4 address for this user.
"""
return pulumi.get(self, "fixed_ip")
@fixed_ip.setter
def fixed_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fixed_ip", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The hostname of the user.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
The IP address of the user.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def mac(self) -> Optional[pulumi.Input[str]]:
"""
The MAC address of the user.
"""
return pulumi.get(self, "mac")
@mac.setter
def mac(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mac", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkId")
def network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network ID for this user.
"""
return pulumi.get(self, "network_id")
@network_id.setter
def network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network_id", value)
@property
@pulumi.getter
def note(self) -> Optional[pulumi.Input[str]]:
"""
A note with additional information for the user.
"""
return pulumi.get(self, "note")
@note.setter
def note(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "note", value)
@property
@pulumi.getter
def site(self) -> Optional[pulumi.Input[str]]:
"""
The name of the site to associate the user with.
"""
return pulumi.get(self, "site")
@site.setter
def site(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site", value)
@property
@pulumi.getter(name="skipForgetOnDestroy")
def skip_forget_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether this resource should tell the controller to "forget" the user on destroy. Defaults to `false`.
"""
return pulumi.get(self, "skip_forget_on_destroy")
@skip_forget_on_destroy.setter
def skip_forget_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_forget_on_destroy", value)
@property
@pulumi.getter(name="userGroupId")
def user_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The user group ID | |
<filename>chemsep_operation.py
"""ChemSep database operations. Gather serialized XML data,
convert paramaters and add functions"""
import math
import numpy as np
import xml.etree.ElementTree as ET
from pickle import load, dump
class Chemical(object):
def __init__(self, name, lib_index):
self.name = name
self.LibraryIndex = lib_index
"""
self.CriticalTemperature = "K"
self.CriticalPressure = "Pa"
self.CriticalVolume = "m3"
self.CriticalCompressibility = ""
self.NormalBoilingPointTemperature = ""
self.NormalMeltingPointTemperature = ""
self.TriplePointTemperature = ""
self.TriplePointPressure = ""
self.MolecularWeight = ""
self.LiquidVolumeAtNormalBoilingPoint = ""
self.AcentricityFactor = ""
self.SolubilityParameter = ""
self.DipoleMoment = ""
self.HeatOfFormation = ""
self.GibbsEnergyOfFormation = ""
self.AbsEntropy = ""
self.HeatOfFusionAtMeltingPoint = ""
self.HeatOfCombustion = ""
self.COSTALDVolume = ""
self.DiameterLJ = ""
self.EnergyLJ = ""
self.RacketParameter = ""
self.FullerVolume = ""
self.Parachor = ""
self.SpecificGravity = ""
self.CostaldAcentricFactor = ""
self.WilsonVolume = ""
self.ChaoSeaderAcentricFactor = ""
self.ChaoSeaderSolubilityParameter = ""
self.ChaoSeaderLiquidVolume = ""
self.MatthiasCopemanC1 = ""
self.MatthiasCopemanC2 = ""
self.MatthiasCopemanC3 = ""
self.UniquacR = ""
self.UniquacQ = ""
self.UniquacQP = ""
self.ApiSrkS1 = ""
self.ApiSrkS2 = ""
self.UnifacVLE = [] #Array
self.UnifacLLE = [] #Array
self.Asog = [] #Array
self.GCmethod = [] #Array
self.Umr = [] #Array
self.ModifiedUnifac = [] #Array """
def eq1 (A):
"""Chemsep equation 1
:param A: Equation parameter"""
return A
def eq2 (A, B, T):
"""Chemsep equation 2
:param A: Equation parameter A
:param B: Equation parameter B
:param T: Temperature in K"""
return A + B*T
def eq3 (A, B, C, T):
"""Chemsep equation 3
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param T: Temperature in K"""
return A + B*T + C*(T**2)
def eq4 (A, B, C, D, T):
"""Chemsep equation 4
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param T: Temperature in K"""
return A + B*T + C*(T**2) + D*(T**3)
def eq5 (A, B, C, D, E, T):
"""Chemsep equation 5
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + B*T + C*(T**2) + D*(T**3) + E*(T**4)
def eq10 (A, B, C, T):
"""Chemsep equation 10
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param T: Temperature in K"""
return math.exp( A - B/(T+C) )
def eq11 (A):
"""Chemsep equation 11
:param A: Equation parameter A"""
return math.exp(A)
def eq12 (A, B, T):
"""Chemsep equation 12
:param A: Equation parameter A
:param B: Equation parameter B
:param T: Temperature in K"""
return math.exp(A + B*T)
def eq13 (A, B, C, T):
"""Chemsep equation 13
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param T: Temperature in K"""
return math.exp( A + B*T + C*(T**2) )
def eq14 (A, B, C, D, T):
"""Chemsep equation 14
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param T: Temperature in K"""
return math.exp( A + B*T + C*(T**2) + D*(T**3) )
def eq15 (A, B, C, D, E, T):
"""Chemsep equation 15
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return math.exp( A + B*T + C*(T**2) + D*(T**3) + E*(T**4) )
def eq16 (A, B, C, D, E, T):
"""Chemsep equation 16
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + math.exp( B/T + C + D*T + E*(T**2) )
def eq17 (A, B, C, D, E, T):
"""Chemsep equation 17
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + math.exp( B + C*T + D*(T**2) + E*(T**3) )
def eq100 (A, B, C, D, E, T):
"""Chemsep equation 100
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + B*T + C*(T**2) + D*(T**3) + E*(T**4)
def eq101 (A, B, C, D, E, T):
"""Chemsep equation 101
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return math.exp( A + B/T + C*math.log(T) + D*(T**E) )
def eq102 (A, B, C, D, T):
"""Chemsep equation 102
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param T: Temperature in K"""
return A*(T**B) / (1 + C/T + D/(T**2))
def eq104 (A, B, C, D, E, T):
"""Chemsep equation 104
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + B/T + C/(T**3) + D/(T**8) + E/(T**9)
def eq105 (A, B, C, D, T):
"""Chemsep equation 105
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param T: Temperature in K"""
body = 1+(1-T/C)**D
return A/math.pow(B,body)
def eq106 (A, B, C, D, E, Tc, T):
"""Chemsep equation 106
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param Tc: Critical temperature
:param T: Temperature in K"""
Tr = T/Tc #Critical temperature
body = B + C*Tr + D*(Tr**2) + E*(Tr**3)
return A * math.pow(1-Tr, body)
def eq120 (A, B, C, T):
"""Chemsep equation 120
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param T: Temperature in K"""
return A - B/(T+C)
def eq121 (A, B, C, D, E, T):
"""Chemsep equation 121
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param D: Equation parameter D
:param E: Equation parameter E
:param T: Temperature in K"""
return A + B/T + C*math.log(T) + D*(T**E)
def eq208 (A, B, C, T):
"""Chemsep equation 208, Antoine equation
:param A: Equation parameter A
:param B: Equation parameter B
:param C: Equation parameter C
:param T: Temperature in K"""
body = A - B/(T+C)
return math.pow(10, body)
#Temperature correlated data graphs
#param = equation_parameters(chemsep_xml,2,2,50)
#equation_to_array(param[0],param[1],param[2],param[3])
def plot_property (eqno, ID, p, data_points):
"""Converts temperature dependent equations
to x and y values. Plot the graph if needed.
:param eqno:[int] Which equation is used?
:param id: id[0]=name, id[1]=CAS-No
:param p: Dict object with all parameters
:p[0]=A, [1]=B [2,3...]=C,D...
:param data_points: Tmin, Tmax, data point number
:return A list containing x,y tuples."""
y = [] # f(x) values
Tmin = data_points[0] # Lower temperature limit
Tmax = data_points[1] # Higher temperature limit
data = data_points[2] # Number of points
if eqno == 1:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq1(params[0]) )
elif eqno == 2:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq2(p[0], p[1], x[i]) )
elif eqno == 3:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq3(p[0],p[1],p[2],x[i]) )
elif eqno == 4:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq4(p[0],p[1],p[2],p[3],x[i]) )
elif eqno == 5:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq5(p[0],p[1],p[2],p[3],p[4],x[i]) )
elif eqno == 10:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq10(p[0],p[1],p[2],x[i]) )
elif eqno == 11:
x = np.linspace(Tmin, Tmax, data)
for i in range(0, data):
y.append( eq11(p[0]) )
elif eqno == 12:
x = np.linspace(Tmin, Tmax, | |
= []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(prev_decoder(decoder_output))]
decoder_predict_outputs += [decoder_dense(prev_decoder(infr_decoder_output))]
decoder_predict_outputs += [infr_h, infr_C]
else:
dense_spec['units'] = self.n_hidden
prev_decoder = Dense(**dense_spec)
dense_spec['units'] = self.n_bins
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(prev_decoder(decoder_output))
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(prev_decoder(infr_decoder_output))
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
if inputs.ndim > 3:
encoder_inputs = [inputs[:, :self.lookback, :, i] for i in range(inputs.shape[3])]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1, :, i] for i in range(inputs.shape[3])]
else:
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
posterior_mean = np.stack(samples).mean(axis=0).squeeze()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_mean.take(i_ch, axis=-1)
ch_samples = [np.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
else:
drawn_samples += [np.random.choice(self.n_bins, mc_samples, p=posterior_mean[t])
for t in range(predictive_horizon)]
drawn_samples = np.stack(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels,
'n_layers': self.n_layers}
if fname is None:
fname = MultilayerMordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MultilayerMordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'multilayer_mordred_{}_bins_{}_hidden_{}_layers_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['n_layers'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = l
weights_fname = spec.pop('weights_fname', None)
if type(spec['ordinal_bins']) is not int:
spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].split('_')[1:][::2]]
#print(weights_fname)
assert weights_fname is not None, "Provide a valid weights filename to load model."
model = MultilayerMordredStrategy(**spec)
model.set_weights(weights_fname)
return model
@property
def seed_length(self):
return self.lookback + 1
class AttentionMordredStrategy(ModelStrategy):
"""Implements the ordinal sequence-to-sequence time series forecasting strategy."""
required_spec_keys = ['ordinal_bins', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback']
id = 'mordred'
def __init__(self, ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9,
lookback=100, horizon=100, n_channels=1, custom_objs=[]):
# type: (int, int, float, float, int, int, int, list) -> None
self.n_bins = ordinal_bins
self.n_hidden = units
self.dropout_rate = dropout_rate
self.lam = lam
self.lookback = lookback
self.horizon = horizon
self.n_channels = n_channels
self.filename = '{}_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(
self.id, self.n_bins, self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels)
loss = 'categorical_crossentropy'
custom_objs = custom_objs
lstm_spec = {'units': self.n_hidden,
'return_state': True,
'kernel_regularizer': l2(self.lam),
'recurrent_regularizer': l2(self.lam),
'dropout': self.dropout_rate,
'recurrent_dropout': self.dropout_rate}
dense_spec = {'units': self.n_bins,
'activation': 'softmax',
'kernel_regularizer': l2(self.lam)}
infr_init_h = Input(shape=(self.n_hidden,))
infr_init_C = Input(shape=(self.n_hidden,))
if self.n_channels > 1:
all_encoder_inputs = [Input(shape=(None, self.n_bins[i]), name='encoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
all_decoder_inputs = [Input(shape=(None, self.n_bins[i]), name='decoder_channel_{}'.format(i + 1))
for i in range(self.n_channels)]
encoder_input = Concatenate(axis=-1)(all_encoder_inputs)
decoder_input = Concatenate(axis=-1)(all_decoder_inputs)
train_inputs = all_encoder_inputs + all_decoder_inputs
encoder_predict_inputs = all_encoder_inputs + [K.learning_phase()]
decoder_predict_inputs = all_decoder_inputs + [infr_init_h, infr_init_C, K.learning_phase()]
else:
encoder_input = Input(shape=(None, self.n_bins))
decoder_input = Input(shape=(None, self.n_bins))
train_inputs = [encoder_input, decoder_input]
encoder_predict_inputs = [encoder_input, K.learning_phase()]
decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()]
encoder_fwd = LSTM(**lstm_spec)
lstm_spec['go_backwards'] = True
encoder_bkwd = LSTM(**lstm_spec)
_, h_fwd, C_fwd = encoder_fwd(encoder_input)
_, h_bkwd, C_bkwd = encoder_bkwd(encoder_input)
decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])]
lstm_spec['return_sequences'] = True
lstm_spec['go_backwards'] = False
decoder_lstm = LSTM(**lstm_spec)
decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states)
infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C])
if self.dropout_rate > 0.:
decoder_output = Dropout(self.dropout_rate)(decoder_output)
infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output)
if self.n_channels > 1:
train_outputs = []
decoder_predict_outputs = []
for i in range(self.n_channels):
dense_spec['units'] = self.n_bins[i]
decoder_dense = Dense(**dense_spec)
train_outputs += [decoder_dense(decoder_output)]
decoder_predict_outputs += [decoder_dense(infr_decoder_output)]
decoder_predict_outputs += [infr_h, infr_C]
else:
decoder_dense = Dense(**dense_spec)
decoded_sequence = decoder_dense(decoder_output)
train_outputs = [decoded_sequence]
inferred_sequence = decoder_dense(infr_decoder_output)
decoder_predict_outputs = [inferred_sequence, infr_h, infr_C]
self.__sequence2sequence = Model(train_inputs, train_outputs)
self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs)
self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) # no learning phase
self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs)
self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs)
self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states)
self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs)
def fit(self, train_frames, **kwargs):
# IMPORTANT: asssumes train_frames is a nparray which technically
# does not allow for channels with different number of bins
batch_size = kwargs.get('batch_size', 256)
val_p = kwargs.get('validation_split', 0.15)
epochs = kwargs.get('epochs', 50)
def get_inputs(x):
if x.ndim > 3:
return [x[:, :self.lookback, :, i] for i in range(x.shape[-1])] + \
[x[:, self.lookback:self.lookback + self.horizon, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, :self.lookback], x[:, self.lookback:self.lookback + self.horizon]]
def get_outputs(x):
if x.ndim > 3:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1, :, i]
for i in range(x.shape[-1])]
else:
return [x[:, self.lookback + 1:self.lookback + self.horizon + 1]]
train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=get_outputs,
batch_size=batch_size, val_p=val_p)
cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)]))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'),
ModelCheckpoint(cp_fname, monitor='val_loss', mode='min',
save_best_only=True,
save_weights_only=True)]
self.__sequence2sequence.fit_generator(train_gen,
steps_per_epoch=tr_steps,
verbose=2,
validation_data=val_gen,
validation_steps=val_steps,
callbacks=callbacks,
epochs=epochs)
self.__sequence2sequence.load_weights(cp_fname)
os.remove(cp_fname)
def predict(self, inputs, predictive_horizon=100, mc_samples=100):
samples = []
if inputs.ndim > 3:
encoder_inputs = [inputs[:, :self.lookback, :, i] for i in range(inputs.shape[3])]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1, :, i] for i in range(inputs.shape[3])]
else:
encoder_inputs = [inputs[:, :self.lookback]]
first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]]
for i_s in range(mc_samples):
h, c = self.predict_stochastic_encoder(encoder_inputs + [True])
decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True])
seq = [decoder_stochastic_output[:-2]]
for t in range(predictive_horizon-1):
decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True])
seq += [decoder_stochastic_output[:-2]]
samples += [np.stack(seq, axis=-1).T.squeeze()]
posterior_mean = np.stack(samples).mean(axis=0).squeeze()
drawn_samples = []
if self.n_channels > 1:
for i_ch in range(self.n_channels):
ch_posterior = posterior_mean.take(i_ch, axis=-1)
ch_samples = [np.random.choice(self.n_bins[i_ch], mc_samples, p=ch_posterior[t])
for t in range(predictive_horizon)]
drawn_samples += [np.stack(ch_samples, axis=-1)]
else:
drawn_samples += [np.random.choice(self.n_bins, mc_samples, p=posterior_mean[t])
for t in range(predictive_horizon)]
drawn_samples = np.stack(drawn_samples, axis=-1)
return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples}
def save(self, folder, fname=None):
if isinstance(self.n_bins, (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(self.n_bins)])
else:
ord_bins = self.n_bins
save_obj = {'ordinal_bins': ord_bins,
'units': self.n_hidden,
'dropout_rate': self.dropout_rate,
'lam': self.lam,
'lookback': self.lookback,
'horizon': self.horizon,
'n_channels':self.n_channels}
if fname is None:
fname = MordredStrategy.get_filename(save_obj, folder)
fname = folder + fname
weights_fname = fname + '_weights.h5'
save_obj['weights_fname'] = weights_fname
self.__sequence2sequence.save_weights(weights_fname, overwrite=True)
with open(fname, 'wb') as f:
pickle.dump(save_obj, f)
def set_weights(self, weights_fname):
self.__sequence2sequence.load_weights(weights_fname)
@staticmethod
def get_filename(model_spec, folder='.'):
assert all([k in model_spec for k in MordredStrategy.required_spec_keys])
if isinstance(model_spec['ordinal_bins'], (list,)):
ord_bins = '_'.join(['chbins{}_{}'.format(i_ch+1, b) for i_ch, b in enumerate(model_spec['ordinal_bins'])])
else:
ord_bins = model_spec['ordinal_bins']
fname = 'mordred_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format(ord_bins,
model_spec['units'],
model_spec['dropout_rate'],
model_spec['lam'],
model_spec['lookback'],
model_spec['horizon'],
model_spec['n_channels'])
return fname[:MAX_FNAME_LENGTH]
@staticmethod
def load(fname, custom_objs = None):
with open(fname, 'rb') as f:
spec = pickle.load(f)
if custom_objs is not None:
spec['custom_objs'] = custom_objs
if 'lambda' in spec:
l = spec.pop('lambda', 0.)
spec['lam'] = | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# dualPrint v1.2
# A multi-platform aplication that generates print sets for multiple pages per sheet, two-sided, printing.
# By <NAME> <EMAIL>, http://www.sourceforge.net/projects/dualprint
import pygtk
pygtk.require('2.0')
import gobject
import random
import os
import platform
import sys
import gtk
import webbrowser
#EndImports
# Pyperclip v1.3 (Extract, only copy functions have been implemented to use with dualPrint.)
# A cross-platform clipboard module for Python.
# By <NAME> <EMAIL>
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, <NAME>
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
def winSetClipboard(text):
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1,hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def gtkSetClipboard(text):
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtSetClipboard(text):
cb.setText(text)
def xclipSetClipboard(text):
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xselSetClipboard(text):
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip') == 0
if xclipExists:
setcb = xclipSetClipboard
else:
xselExists = os.system('which xsel') == 0
if xselExists:
setcb = xselSetClipboard
try:
setcb = gtkSetClipboard
except:
try:
import PyQt4.QtCore
import PyQt4.QtGui
app = QApplication([])
cb = PyQt4.QtGui.QApplication.clipboard()
setcb = qtSetClipboard
except:
raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
copy = setcb
#Continue dualPrint...
class iscApp1:
iscVwImpar = ""
iscVqDifference = 0
iscVqTest = 0
iscVcountText = "countText"
iscVn1 = 1
iscVwrite = "write"
iscVwPar = ""
iscVstartText = "1"
iscVslText = "4"
iscVnText = "12"
iscVcount = 0
iscVstart = 1
iscVsl = 4
iscVn = 12
iscVguion = "-"
iscVcoma = ","
iscVn2 = 2
iscVNotifyOSD_Imp = "notify-send \'dualPrint: Odd Copy\' \'The first print set has been copied to the clipboard. You may paste it in the print dialog.\'"
iscVNotifyOSD_Par = "notify-send \'dualPrint: Even Copy\' \'The seccond print set has been copied to the clipboard. You may paste it in the print dialog.\'"
iscVlink_web = "http://sourceforge.net/projects/dualprint/"
iscVlink_license = "http://www.opensource.org/licenses/MIT"
iscVwAbout = 0
iscVlink_pHelp = "redirect.html"
iscWindow151main1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
iscWindow151main1Fixed = gtk.Fixed()
iscWindow151nQ0 =gtk.Label("Which would be the last page to print?")
iscWindow151slidesQ0 =gtk.Label("How many slides or pages per side?")
iscWindow151n0 = gtk.Entry()
iscWindow151sl0 = gtk.Entry()
iscWindow151bStart0 = gtk.Button("Generate Print Sets")
iscWindow151inicioQ0 =gtk.Label("Which would be the first page to print?")
iscWindow151start0 = gtk.Entry()
iscWindow151infoImpar0 =gtk.Label("Odd, set of pages to print first.")
iscWindow151wImpar0 = gtk.Entry()
iscWindow151parInfo0 =gtk.Label("Even, set of pages to print on the back.")
iscWindow151wPar0 = gtk.Entry()
iscWindow151CI0 = gtk.Image()
iscWindow151CP0 = gtk.Image()
iscWindow151about0 = gtk.Button("About dualPrint")
iscWindow151paper0 = gtk.Button("Printing help")
iscWindow151header0 = gtk.Image()
iscWindow1about1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
iscWindow1about1Fixed = gtk.Fixed()
iscWindow1icon0 = gtk.Image()
iscWindow1info0 =gtk.Label("dualPrint is a multi-platform application that")
iscWindow1rights0 =gtk.Label("Copyright © 2012 <NAME>")
iscWindow1close0 = gtk.Button("Close")
iscWindow1web0 = gtk.Button("Website")
iscWindow1MIT0 =gtk.Label("This software is under the MIT License")
iscWindow1version0 =gtk.Label("1.2")
iscWindow1dualprint0 =gtk.Label("dualPrint")
iscWindow1license0 = gtk.Button("License")
iscWindow1illumination0 =gtk.Label("Built in Illumination Software Creator")
iscWindow1info10 =gtk.Label("generates print sets for multiple pages per")
iscWindow1info20 =gtk.Label("sheet, two-sided, printing.")
iscWindow1MIT10 =gtk.Label("For more information click License.")
#EndOfGlobalVariables
def main(self):
gtk.main()
def destroy(self, widget, data=None):
gtk.main_quit()
#EndOfClass
def iscTargetIs3():
iscIf_Linux122()
#iscTargetIs3Python
pass
def iscTargetIs4():
iscIf_Linux113()
#iscTargetIs4Python
pass
def iscIfThen5():
if thisiscApp1.iscVwAbout == thisiscApp1.iscVn1:
#iscIfThen5True
pass
else:
iscSetNumber139()
#iscIfThen5False
pass
def iscTargetIs6():
iscClipboard_Copy118()
#iscTargetIs6Python
pass
def iscTargetIs7():
iscClipboard_Copy118()
#iscTargetIs7Python
pass
def iscTargetIs8():
iscClipboard_Copy120()
#iscTargetIs8Python
pass
def iscPortalDestination9():
iscSetNumber69()
iscAdd132()
iscAdd130()
iscAdd138()
iscDoWhile10()
#iscPortalDestination9Arrived
def iscDoWhile10():
while thisiscApp1.iscVcount < thisiscApp1.iscVn:
iscCombineText97()
iscConvertNumberToText133()
iscCombineText64()
iscAdd132()
iscAdd130()
#iscDoWhile10Loop
iscSetText100()
iscAdd132()
iscAdd130()
iscDoWhile11()
#iscDoWhile10Finished
def iscDoWhile11():
while thisiscApp1.iscVcount < thisiscApp1.iscVn:
iscCombineText97()
iscConvertNumberToText133()
iscCombineText64()
iscAdd132()
iscAdd130()
#iscDoWhile11Loop
iscSetText61()
#iscDoWhile11Finished
def iscPortalDeparture12():
iscPortalDestination31();
#iscPortalDeparture12Done
def iscIfThen13():
if thisiscApp1.iscVqTest > thisiscApp1.iscVn:
iscPortalDeparture12()
#iscIfThen13True
pass
else:
iscPortalDeparture14()
#iscIfThen13False
pass
def iscPortalDeparture14():
iscPortalDestination33();
#iscPortalDeparture14Done
def iscIfThen15():
if thisiscApp1.iscVqTest == thisiscApp1.iscVqDifference:
iscPortalDeparture14()
#iscIfThen15True
pass
else:
iscAdd136()
iscAdd137()
iscIfThen13()
#iscIfThen15False
pass
def iscIfThen16():
if thisiscApp1.iscVqTest < thisiscApp1.iscVqDifference:
iscPortalDeparture14()
#iscIfThen16True
pass
else:
iscIfThen15()
#iscIfThen16False
pass
def iscPortalDeparture17():
iscPortalDestination32();
#iscPortalDeparture17Done
def iscIfThen18():
if thisiscApp1.iscVqTest == thisiscApp1.iscVn:
iscPortalDeparture17()
#iscIfThen18True
pass
else:
iscDivide145()
iscIfThen16()
#iscIfThen18False
pass
def iscPortalDeparture19():
iscPortalDestination9()
#iscPortalDeparture19Done
def iscIfThen20():
if thisiscApp1.iscVsl == thisiscApp1.iscVn1:
iscPortalDeparture19()
#iscIfThen20True
pass
else:
iscIfThen18()
#iscIfThen20False
pass
def iscIfThen21():
if thisiscApp1.iscVqTest > thisiscApp1.iscVn:
iscMessageBox53()
#iscIfThen21True
pass
else:
iscIfThen20()
#iscIfThen21False
pass
def iscIfThen22():
if thisiscApp1.iscVsl < thisiscApp1.iscVn:
iscAdd136()
iscIfThen21()
#iscIfThen22True
pass
else:
iscMessageBox54()
#iscIfThen22False
pass
def iscIfThen23():
if thisiscApp1.iscVn < thisiscApp1.iscVn2:
iscMessageBox58()
#iscIfThen23True
pass
else:
iscIfThen24()
#iscIfThen23False
pass
def iscIfThen24():
if thisiscApp1.iscVstart < thisiscApp1.iscVn1:
iscMessageBox57()
#iscIfThen24True
pass
else:
iscIfThen25()
#iscIfThen24False
pass
def iscIfThen25():
if thisiscApp1.iscVsl < thisiscApp1.iscVn1:
iscMessageBox56()
#iscIfThen25True
pass
else:
iscIfThen26()
#iscIfThen25False
pass
def iscIfThen26():
if thisiscApp1.iscVstart < thisiscApp1.iscVn:
iscIfThen22()
#iscIfThen26True
pass
else:
iscMessageBox55()
#iscIfThen26False
pass
def iscPortalDestination27():
iscAdd127()
iscSubtract128()
iscIfThen142()
#iscPortalDestination27Arrived
def iscPortalDeparture28():
iscPortalDestination27()
#iscPortalDeparture28Done
def iscPortalDestination29():
iscAdd132()
iscAdd130()
iscConvertNumberToText133()
iscCombineText64()
iscPortalDeparture28()
#iscPortalDestination29Arrived
def iscPortalDeparture30():
iscPortalDestination29()
#iscPortalDeparture30Done
def iscPortalDestination31():
iscSetNumber69()
iscCombineText78()
iscPortalDeparture37()
iscSetText100()
iscCombineText78()
iscAdd127()
iscSubtract128()
iscSubtract123()
iscSubtract131()
iscConvertNumberToText133()
iscCombineText64()
iscSetText61()
#iscPortalDestination31Arrived
def iscPortalDestination32():
iscSetNumber69()
iscCombineText78()
iscPortalDeparture37()
iscSetText100()
iscSetText61()
#iscPortalDestination32Arrived
def iscPortalDestination33():
iscSetNumber69()
iscCombineText78()
iscPortalDeparture39()
#iscPortalDestination33Arrived
def iscPortalDestination34():
iscAdd127()
iscAdd126()
iscIfThen141()
#iscPortalDestination34Arrived
def iscPortalDestination35():
iscAdd132()
iscSubtract131()
iscConvertNumberToText133()
iscCombineText64()
iscPortalDeparture36()
#iscPortalDestination35Arrived
def iscPortalDeparture36():
iscPortalDestination34()
#iscPortalDeparture36Done
def iscPortalDeparture37():
iscPortalDestination35()
#iscPortalDeparture37Done
def iscPortalDestination38():
iscPortalDeparture37()
iscSetText100()
iscCombineText78()
iscPortalDeparture37()
iscSetText61()
#iscPortalDestination38Arrived
def iscPortalDeparture39():
iscPortalDestination38()
#iscPortalDeparture39Done
def iscOpen_in_Web_Browser41():
url=(thisiscApp1.iscVlink_license)
webbrowser.open(url)
#iscOpen_in_Web_Browser41Done
def iscOpen_in_Web_Browser43():
url=(thisiscApp1.iscVlink_web)
webbrowser.open(url)
#iscOpen_in_Web_Browser43Done
def iscOpen_in_Web_Browser45():
url=(thisiscApp1.iscVlink_pHelp)
webbrowser.open(url)
#iscOpen_in_Web_Browser45Done
def iscRunShellScript47():
os.system(thisiscApp1.iscVNotifyOSD_Par)
#iscRunShellScript47Done
def iscAdd48():
thisiscApp1.iscVcount = thisiscApp1.iscVstart + thisiscApp1.iscVsl
iscConvertNumberToText49()
#iscAdd48Done
def iscConvertNumberToText49():
thisiscApp1.iscVwrite = str(thisiscApp1.iscVcount)
#iscConvertNumberToText49Done
def iscConvertTextToNumber50():
try:
thisiscApp1.iscVn = int(thisiscApp1.iscVnText)
except ValueError:
thisiscApp1.iscVn = 0
iscConvertTextToNumber51()
#iscConvertTextToNumber50Done
def iscConvertTextToNumber51():
try:
thisiscApp1.iscVsl = int(thisiscApp1.iscVslText)
except ValueError:
thisiscApp1.iscVsl = 0
iscConvertTextToNumber52()
#iscConvertTextToNumber51Done
def iscConvertTextToNumber52():
try:
thisiscApp1.iscVstart = int(thisiscApp1.iscVstartText)
except ValueError:
thisiscApp1.iscVstart = 0
iscIfThen23()
#iscConvertTextToNumber52Done
def iscMessageBox53():
message = "The sum of the starting page and the pages per side should be less than or equal to the total of pages to print."
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_NONE, message)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
dialog.run()
#iscMessageBox53Open
dialog.destroy()
#iscMessageBox53Closed
def iscMessageBox54():
message = "The number of pages per side should be less than the total of pages to print."
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_NONE, message)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
dialog.run()
#iscMessageBox54Open
dialog.destroy()
#iscMessageBox54Closed
def iscMessageBox55():
message = "The starting page should be lower than the total of pages to print."
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_NONE, message)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
dialog.run()
#iscMessageBox55Open
dialog.destroy()
#iscMessageBox55Closed
def iscMessageBox56():
message = "You should print at least 1 page per side."
dialog = gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_NONE, message)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
dialog.run()
#iscMessageBox56Open
dialog.destroy()
#iscMessageBox56Closed
def iscMessageBox57():
message | |
# Copyright 2019 The Matrix.org Foundation CIC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import os
import urllib.parse
import concurrent.futures
from json import JSONDecodeError
from typing import Any, Dict
from uuid import uuid4
import aiohttp
import attr
import keyring
from aiohttp import ClientSession, web
from aiohttp.client_exceptions import ClientConnectionError, ContentTypeError
from jsonschema import ValidationError
from multidict import CIMultiDict
from nio import (
Api,
EncryptionError,
LoginResponse,
OlmTrustError,
SendRetryError,
DownloadResponse,
)
from nio.crypto import decrypt_attachment
from pantalaimon.client import (
SEARCH_TERMS_SCHEMA,
InvalidLimit,
InvalidOrderByError,
PanClient,
UnknownRoomError,
validate_json,
)
from pantalaimon.index import INDEXING_ENABLED, InvalidQueryError
from pantalaimon.log import logger
from pantalaimon.store import ClientInfo, PanStore
from pantalaimon.thread_messages import (
AcceptSasMessage,
CancelSasMessage,
CancelSendingMessage,
ConfirmSasMessage,
DaemonResponse,
DeviceBlacklistMessage,
DeviceUnblacklistMessage,
DeviceUnverifyMessage,
DeviceVerifyMessage,
ExportKeysMessage,
ImportKeysMessage,
SasMessage,
SendAnywaysMessage,
StartSasMessage,
UnverifiedDevicesSignal,
UnverifiedResponse,
UpdateUsersMessage,
ContinueKeyShare,
CancelKeyShare,
)
CORS_HEADERS = {
"Access-Control-Allow-Headers": (
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
),
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
"Access-Control-Allow-Origin": "*",
}
@attr.s
class ProxyDaemon:
name = attr.ib()
homeserver = attr.ib()
conf = attr.ib()
data_dir = attr.ib()
send_queue = attr.ib()
recv_queue = attr.ib()
proxy = attr.ib(default=None)
ssl = attr.ib(default=None)
client_store_class = attr.ib(default=None)
decryption_timeout = 10
unverified_send_timeout = 60
store = attr.ib(type=PanStore, init=False)
homeserver_url = attr.ib(init=False, default=attr.Factory(dict))
hostname = attr.ib(init=False, default=attr.Factory(dict))
pan_clients = attr.ib(init=False, default=attr.Factory(dict))
client_info = attr.ib(init=False, default=attr.Factory(dict), type=dict)
default_session = attr.ib(init=False, default=None)
media_info = attr.ib(init=False, default=None)
database_name = "pan.db"
def __attrs_post_init__(self):
loop = asyncio.get_event_loop()
self.homeserver_url = self.homeserver.geturl()
self.hostname = self.homeserver.hostname
self.store = PanStore(self.data_dir)
accounts = self.store.load_users(self.name)
self.media_info = self.store.load_media(self.name)
for user_id, device_id in accounts:
if self.conf.keyring:
try:
token = keyring.get_password(
"pantalaimon", f"{user_id}-{device_id}-token"
)
except RuntimeError as e:
logger.error(e)
else:
token = self.store.load_access_token(user_id, device_id)
if not token:
logger.warn(
f"Not restoring client for {user_id} {device_id}, "
f"missing access token."
)
continue
logger.info(f"Restoring client for {user_id} {device_id}")
pan_client = PanClient(
self.name,
self.store,
self.conf,
self.homeserver_url,
self.send_queue,
user_id,
device_id,
store_path=self.data_dir,
ssl=self.ssl,
proxy=self.proxy,
store_class=self.client_store_class,
media_info=self.media_info,
)
pan_client.user_id = user_id
pan_client.access_token = token
pan_client.load_store()
self.pan_clients[user_id] = pan_client
loop.create_task(
self.send_ui_message(
UpdateUsersMessage(self.name, user_id, pan_client.device_id)
)
)
loop.create_task(pan_client.send_update_devices(pan_client.device_store))
pan_client.start_loop()
async def _find_client(self, access_token):
client_info = self.client_info.get(access_token, None)
if not client_info:
async with aiohttp.ClientSession() as session:
try:
method, path = Api.whoami(access_token)
resp = await session.request(
method,
self.homeserver_url + path,
proxy=self.proxy,
ssl=self.ssl,
)
except ClientConnectionError:
return None
if resp.status != 200:
return None
try:
body = await resp.json()
except (JSONDecodeError, ContentTypeError):
return None
try:
user_id = body["user_id"]
except KeyError:
return None
if user_id not in self.pan_clients:
logger.warn(
f"User {user_id} doesn't have a matching pan " f"client."
)
return None
logger.info(
f"Homeserver confirmed valid access token "
f"for user {user_id}, caching info."
)
client_info = ClientInfo(user_id, access_token)
self.client_info[access_token] = client_info
client = self.pan_clients.get(client_info.user_id, None)
return client
async def _verify_device(self, message_id, client, device):
ret = client.verify_device(device)
if ret:
msg = (
f"Device {device.id} of user " f"{device.user_id} succesfully verified."
)
await client.send_update_device(device)
else:
msg = f"Device {device.id} of user " f"{device.user_id} already verified."
logger.info(msg)
await self.send_response(message_id, client.user_id, "m.ok", msg)
async def _unverify_device(self, message_id, client, device):
ret = client.unverify_device(device)
if ret:
msg = (
f"Device {device.id} of user "
f"{device.user_id} succesfully unverified."
)
await client.send_update_device(device)
else:
msg = f"Device {device.id} of user " f"{device.user_id} already unverified."
logger.info(msg)
await self.send_response(message_id, client.user_id, "m.ok", msg)
async def _blacklist_device(self, message_id, client, device):
ret = client.blacklist_device(device)
if ret:
msg = (
f"Device {device.id} of user "
f"{device.user_id} succesfully blacklisted."
)
await client.send_update_device(device)
else:
msg = (
f"Device {device.id} of user " f"{device.user_id} already blacklisted."
)
logger.info(msg)
await self.send_response(message_id, client.user_id, "m.ok", msg)
async def _unblacklist_device(self, message_id, client, device):
ret = client.unblacklist_device(device)
if ret:
msg = (
f"Device {device.id} of user "
f"{device.user_id} succesfully unblacklisted."
)
await client.send_update_device(device)
else:
msg = (
f"Device {device.id} of user "
f"{device.user_id} already unblacklisted."
)
logger.info(msg)
await self.send_response(message_id, client.user_id, "m.ok", msg)
async def send_response(self, message_id, pan_user, code, message):
"""Send a thread response message to the UI thread."""
message = DaemonResponse(message_id, pan_user, code, message)
await self.send_ui_message(message)
async def send_ui_message(self, message):
"""Send a thread message to the UI thread."""
if self.send_queue:
await self.send_queue.put(message)
async def receive_message(self, message):
client = self.pan_clients.get(message.pan_user)
if isinstance(
message,
(
DeviceVerifyMessage,
DeviceUnverifyMessage,
StartSasMessage,
DeviceBlacklistMessage,
DeviceUnblacklistMessage,
),
):
device = client.device_store[message.user_id].get(message.device_id, None)
if not device:
msg = (
f"No device found for {message.user_id} and " f"{message.device_id}"
)
await self.send_response(
message.message_id, message.pan_user, "m.unknown_device", msg
)
logger.info(msg)
return
if isinstance(message, DeviceVerifyMessage):
await self._verify_device(message.message_id, client, device)
elif isinstance(message, DeviceUnverifyMessage):
await self._unverify_device(message.message_id, client, device)
elif isinstance(message, DeviceBlacklistMessage):
await self._blacklist_device(message.message_id, client, device)
elif isinstance(message, DeviceUnblacklistMessage):
await self._unblacklist_device(message.message_id, client, device)
elif isinstance(message, StartSasMessage):
await client.start_sas(message, device)
elif isinstance(message, SasMessage):
if isinstance(message, AcceptSasMessage):
await client.accept_sas(message)
elif isinstance(message, ConfirmSasMessage):
await client.confirm_sas(message)
elif isinstance(message, CancelSasMessage):
await client.cancel_sas(message)
elif isinstance(message, ExportKeysMessage):
path = os.path.abspath(os.path.expanduser(message.file_path))
logger.info(f"Exporting keys to {path}")
try:
await client.export_keys(path, message.passphrase)
except OSError as e:
info_msg = (
f"Error exporting keys for {client.user_id} to" f" {path} {e}"
)
logger.info(info_msg)
await self.send_response(
message.message_id, client.user_id, "m.os_error", str(e)
)
else:
info_msg = (
f"Succesfully exported keys for {client.user_id} " f"to {path}"
)
logger.info(info_msg)
await self.send_response(
message.message_id, client.user_id, "m.ok", info_msg
)
elif isinstance(message, ImportKeysMessage):
path = os.path.abspath(os.path.expanduser(message.file_path))
logger.info(f"Importing keys from {path}")
try:
await client.import_keys(path, message.passphrase)
except (OSError, EncryptionError) as e:
info_msg = (
f"Error importing keys for {client.user_id} " f"from {path} {e}"
)
logger.info(info_msg)
await self.send_response(
message.message_id, client.user_id, "m.os_error", str(e)
)
else:
info_msg = (
f"Succesfully imported keys for {client.user_id} " f"from {path}"
)
logger.info(info_msg)
await self.send_response(
message.message_id, client.user_id, "m.ok", info_msg
)
elif isinstance(message, UnverifiedResponse):
client = self.pan_clients[message.pan_user]
if message.room_id not in client.send_decision_queues:
msg = (
f"No send request found for user {message.pan_user} "
f"and room {message.room_id}."
)
await self.send_response(
message.message_id, message.pan_user, "m.unknown_request", msg
)
return
queue = client.send_decision_queues[message.room_id]
await queue.put(message)
elif isinstance(message, (ContinueKeyShare, CancelKeyShare)):
client = self.pan_clients[message.pan_user]
await client.handle_key_request_message(message)
def get_access_token(self, request):
# type: (aiohttp.web.BaseRequest) -> str
"""Extract the access token from the request.
This method extracts the access token either from the query string or
from the Authorization header of the request.
Returns the access token if it was found.
"""
access_token = request.query.get("access_token", "")
if not access_token:
access_token = request.headers.get("Authorization", "").strip("Bearer ")
return access_token
def sanitize_filter(self, sync_filter):
# type: (Dict[Any, Any]) -> Dict[Any, Any]
"""Make sure that a filter isn't filtering encrypted messages."""
sync_filter = dict(sync_filter)
room_filter = sync_filter.get("room", None)
if room_filter:
timeline_filter = room_filter.get("timeline", None)
if timeline_filter:
types_filter = timeline_filter.get("types", None)
if types_filter:
if "m.room.encrypted" not in types_filter:
types_filter.append("m.room.encrypted")
not_types_filter = timeline_filter.get("not_types", None)
if not_types_filter:
try:
not_types_filter.remove("m.room.encrypted")
except ValueError:
pass
return sync_filter
async def forward_request(
self,
request, # type: aiohttp.web.BaseRequest
params=None, # type: CIMultiDict
data=None, # type: bytes
session=None, # type: aiohttp.ClientSession
token=None, # type: str
):
# type: (...) -> aiohttp.ClientResponse
"""Forward the given request to our configured homeserver.
Args:
request (aiohttp.BaseRequest): The request that should be
forwarded.
params (CIMultiDict, optional): The query parameters for the
request.
data (Dict, optional): Data for the request.
session (aiohttp.ClientSession, optional): The client session that
should be used to forward the request.
token (str, optional): The access token that should be used for the
request.
"""
if not session:
if not self.default_session:
self.default_session = ClientSession()
session = self.default_session
assert session
path = urllib.parse.quote(
request.path
) # re-encode path stuff like room aliases
method = request.method
headers = CIMultiDict(request.headers)
headers.pop("Host", None)
params = params or CIMultiDict(request.query)
if token:
if "Authorization" in headers:
headers["Authorization"] = f"Bearer {token}"
if "access_token" in params:
params["access_token"] = token
if data:
data = data
headers.pop("Content-Length", None)
else:
data = await request.read()
return await session.request(
method,
self.homeserver_url + path,
data=data,
params=params,
headers=headers,
proxy=self.proxy,
ssl=self.ssl,
)
async def forward_to_web(
self, request, params=None, data=None, session=None, token=None
):
"""Forward the given request and convert the response to a Response.
If there is a exception raised by the client session this method
returns a Response with a 500 status code and the text set to the error
message of the exception.
Args:
request | |
import numpy as np
class Real():
def __init__(self, value: float = 0):
self.value = np.array([value], dtype=float)
def __add__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value + rhs.value
else:
out.value = self.value + rhs
return out
def __radd__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.values + self.value
else:
out.value = lhs + self.value
return out
def __sub__(self, rhs):
out = Real()
if isinstance(rhs, Real):
out.value = self.value - rhs.value
else:
out.value = self.value - rhs
return out
def __rsub__(self, lhs):
out = Real()
if isinstance(lhs, Real):
out.value = lhs.value - self.value
else:
out.value = lhs - self.value
return out
def __mul__(self, rhs):
out = Real()
if isinstance(rhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = self.value*rhs.value
elif isinstance(rhs, (float, int, complex)):
out.value = self.value*rhs
return out
def __rmul__(self, lhs):
out = Real()
if isinstance(lhs, (Real, Complex, RealMatrix, ComplexMatrix)):
out.value = lhs.value*self.value
elif isinstance(lhs, (float, int, complex)):
out.value = lhs*self.value
return out
def __pow__(self, n):
out = Real()
if isinstance(n, (float, int)):
out.value = self.value**n
else:
out.value = self.value**n.value
return out
class Complex(Real):
def __init__(self, value: complex = 1j):
super().__init__()
self.value = np.array([value], dtype=complex)
def re(self):
out = Real()
out.value = np.real(self.value)
return out
def im(self):
out = Real()
out.value = np.imag(self.value)
return out
def conj(self):
out = Complex()
out.value = np.conj(self.value)
return out
class RealMatrix():
def __init__(self, N: int = None, value: np.ndarray = None):
if N != None:
self.N = N
self.value = np.zeros((N, N), dtype=float)
else:
self.N = len(value)
self.value = value
def transpose(self):
out = RealMatrix(self.N)
out.value = np.transpose(self.value)
return out
def trace(self):
tr = np.trace(self.value)
return Real(tr)
def det(self):
d = np.linalg.det(self.value)
return Real(d)
def inv(self):
out = RealMatrix(self.N)
out.value = np.linalg.inv(self.value)
return out
def __add__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
elif isinstance(rhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
return out
def __radd__(self, lhs):
if isinstance(lhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(lhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(rhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
if isinstance(lhs, RealMatrix):
out = RealMatrix(self.N)
if isinstance(lhs, ComplexMatrix):
out = ComplexMatrix(self.N)
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = np.dot(self.value, rhs.value)
elif isinstance(rhs, Real):
out = RealMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, Complex):
out = ComplexMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, VectorComplex):
out = VectorComplex(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = np.dot(self.value, rhs.value)
elif isinstance(rhs, VectorReal):
out = VectorReal(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = np.dot(self.value, rhs.value)
return out
class Identity(RealMatrix):
def __init__(self, N: int):
super().__init__(N)
self.value = np.diag([1]*self.N)
class ComplexMatrix(RealMatrix):
def __init__(self, N: int = None, value: np.ndarray = None):
if N != None:
self.N = N
self.value = np.zeros((N, N), dtype=complex)
else:
self.N = len(value)
self.value = value
def transpose(self):
out = ComplexMatrix(self.N)
out.value = np.transpose(self.value)
return out
def conj(self):
out = ComplexMatrix(self.N)
out.value = np.conj(self.value)
return out
def adj(self):
tmp = ComplexMatrix(self.N)
tmp = self.conj()
return tmp.transpose()
def re(self):
out = RealMatrix(self.N)
out.value = np.real(self.value)
return out
def im(self):
out = RealMatrix(self.N)
out.value = np.imag(self.value)
return out
def trace(self):
tr = np.trace(self.value)
return Complex(tr)
def det(self):
d = np.linalg.det(self.value)
return Complex(d)
def inv(self):
out = ComplexMatrix(self.N)
out.value = np.linalg.inv(self.value)
return out
def __add__(self, rhs):
out = ComplexMatrix(self.N)
if isinstance(rhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
return out
def __radd__(self, lhs):
out = ComplexMatrix(self.N)
if isinstance(lhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = ComplexMatrix(self.N)
if isinstance(rhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = ComplexMatrix(self.N)
if isinstance(lhs, (RealMatrix, ComplexMatrix)):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
if isinstance(rhs, RealMatrix):
out = RealMatrix(self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = np.dot(self.value, rhs.value)
elif isinstance(rhs, (Complex, Real)):
out = RealMatrix(self.N)
out.value = self.value*rhs.value
elif isinstance(rhs, VectorComplex):
out = VectorComplex(Nd=self.N)
assert(self.value.shape[1] == rhs.value.shape[0])
out.value = np.dot(self.value, rhs.value)
return out
class VectorReal():
def __init__(self, Nd: int = None, value: np.ndarray = None):
if Nd != None:
self.Nd = Nd
self.value = np.array([0.]*self.Nd, dtype=float)
else:
self.Nd = len(value)
self.value = value
def __getitem__(self, mu: int):
return Real(self.value[mu])
def poke_component(self, mu: int, m):
if isinstance(m, Real):
self.value[mu] = m.value
elif isinstance(m, (int, float)):
self.value[mu] = m
def __add__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
elif isinstance(rhs, Real):
out.value = self.value + rhs.value
return out
def __radd__(self, lhs):
out = VectorReal(Nd=self.Nd)
if isinstance(lhs, VectorReal):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
elif isinstance(lhs, Real):
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
elif isinstance(rhs, Real):
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = VectorReal(Nd=self.Nd)
if isinstance(lhs, VectorReal):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
elif isinstance(lhs, Real):
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = self.value * rhs.value
elif isinstance(rhs, Real):
out.value = self.value * rhs.value
return out
def dot(self, rhs):
out = VectorReal(Nd=self.Nd)
if isinstance(rhs, VectorReal):
assert(self.value.shape == rhs.value.shape)
out.value = np.dot(self.value, rhs.value)
elif isinstance(rhs, Real):
out.value = self.value*rhs.value
return out
def transpose(self):
out = VectorReal(Nd=self.Nd)
out.value = self.value[:]
return out
class VectorComplex():
def __init__(self, Nd: int = None, value: np.ndarray = None):
if Nd != None:
self.Nd = Nd
self.value = np.array([1j]*self.Nd, dtype=complex)
else:
self.Nd = len(value)
self.value = value
def __getitem__(self, mu: int):
return Complex(self.value[mu])
def poke_component(self, mu: int, m):
if isinstance(m, Complex):
self.value[mu] = m.value
elif isinstance(m, (int, float)):
self.value[mu] = m
def __add__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value + rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value + rhs.value
return out
def __radd__(self, lhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(lhs, VectorComplex):
assert(self.value.shape == lhs.value.shape)
out.value = self.value + lhs.value
elif isinstance(lhs, (Real, Complex)):
out.value = self.value + lhs.value
return out
def __sub__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value - rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value - rhs.value
return out
def __rsub__(self, lhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(lhs, VectorComplex):
assert(self.value.shape == lhs.value.shape)
out.value = lhs.value - self.value
elif isinstance(lhs, (Real, Complex)):
out.value = lhs.value - self.value
return out
def __mul__(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = self.value * rhs.value
elif isinstance(rhs, (Real, Complex)):
out.value = self.value * rhs.value
return out
def dot(self, rhs):
out = VectorComplex(Nd=self.Nd)
if isinstance(rhs, VectorComplex):
assert(self.value.shape == rhs.value.shape)
out.value = np.dot(self.value, rhs.value)
elif isinstance(rhs, (Real, Complex)):
out.value = self.value*rhs.value
return out
def transpose(self):
out = VectorComplex(Nd=self.Nd)
out.value = self.value[:]
return out
class VectorRealMatrix():
def __init__(self, Nd: int = None, N: int = None, value: np.ndarray = None):
self.Nd = Nd
self.N = N
if N != None and Nd != None:
self.value = np.zeros(shape=(Nd, N, N), dtype=float)
else:
self.value = value
self.Nd = value.shape[0]
self.N = value.shape[1]
def __getitem__(self, mu: int):
out = RealMatrix(N=self.N)
out.value = self.value[mu]
return out
def poke_component(self, mu: int, m):
if isinstance(m, RealMatrix):
self.value[mu] = m.value
elif isinstance(m, np.ndarray):
self.value[mu] = m
def __add__(self, rhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(rhs, VectorRealMatrix):
assert(self.value.shape == rhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + rhs.value[mu]
elif isinstance(rhs, RealMatrix):
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + rhs.value
elif isinstance(rhs, Real):
out.value = self.value + rhs.value
elif isinstance(rhs, (int, float)):
out.value = self.value + rhs
return out
def __radd__(self, lhs):
out = VectorRealMatrix(Nd=self.Nd, N=self.N)
if isinstance(lhs, VectorRealMatrix):
assert(self.value.shape == lhs.value.shape)
for mu in range(self.Nd):
out.value[mu] = self.value[mu] + lhs.value[mu]
elif isinstance(lhs, RealMatrix):
for mu in range(self.Nd):
| |
S 39702528 cmand
29140 1 0% 0% 0% S 4927488 rotee
29439 1 0% 0% 0% S 4927488 rotee
29452 27708 0% 0% 0% S 4407296 pman.sh
29495 28464 0% 0% 0% S 27250688 emd
29699 27708 0% 0% 0% S 4407296 pman.sh
29704 1 0% 0% 0% S 4927488 rotee
29787 28831 0% 0% 0% S 4294967295 fman_rp
29949 1 0% 0% 0% S 4927488 rotee
30526 29699 0% 0% 0% S 52994048 imand
30643 27708 0% 0% 0% S 4407296 pman.sh
30914 29452 0% 0% 0% R 43081728 hman
30953 1 0% 0% 0% S 4927488 rotee
31695 27708 0% 0% 0% S 4407296 pman.sh
31700 30643 1% 5% 32% S 4294967295 linux_iosd-imag
32105 27708 0% 0% 0% S 4431872 pman.sh
32309 1 0% 0% 0% S 4927488 rotee
32339 27708 0% 0% 0% S 4407296 pman.sh
32609 1 0% 0% 0% S 4927488 rotee
32706 1 0% 0% 0% S 4927488 rotee
'''
}
def test_golden(self):
self.device = Mock(**self.golden_output)
cpu_platform_obj = ShowProcessesCpuPlatform(device=self.device)
parsed_output = cpu_platform_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
cpu_platform_obj = ShowProcessesCpuPlatform(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = cpu_platform_obj.parse()
class TestShowEnv(unittest.TestCase):
dev = Device(name='c3850')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'critical_larams': 0,
'major_alarms': 0,
'minor_alarms': 0,
'slot': {'0': {'sensor': {'Temp: Asic1': {'reading': '50 Celsius',
'state': 'Normal'},
'Temp: Center': {'reading': '37 Celsius',
'state': 'Normal'},
'Temp: Left': {'reading': '30 Celsius',
'state': 'Normal'},
'Temp: Right': {'reading': '35 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11894 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '749 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '898 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1098 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '1196 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '1494 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '1796 mV',
'state': 'Normal'},
'V1: VME': {'reading': '2490 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '3286 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11865 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '747 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '996 mV',
'state': 'Normal'},
'V2: VME': {'reading': '747 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '747 mV',
'state': 'Normal'}}},
'1': {'sensor': {'Temp: Asic1': {'reading': '38 Celsius',
'state': 'Normal'},
'Temp: Center': {'reading': '29 Celsius',
'state': 'Normal'},
'Temp: Left': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp: Right': {'reading': '29 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11879 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '747 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '891 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3291 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1098 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '1196 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '1494 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '1791 mV',
'state': 'Normal'},
'V1: VME': {'reading': '2490 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '3286 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11865 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '749 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '996 mV',
'state': 'Normal'},
'V2: VME': {'reading': '747 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '747 mV',
'state': 'Normal'}}},
'F0': {'sensor': {'Temp: CPP Rear': {'reading': '40 Celsius',
'state': 'Normal'},
'Temp: HKP Die': {'reading': '47 Celsius',
'state': 'Normal'},
'Temp: Inlet': {'reading': '30 Celsius',
'state': 'Normal'},
'Temp: Left Ext': {'reading': '42 Celsius',
'state': 'Normal'},
'Temp: MCH Die': {'reading': '53 Celsius',
'state': 'Normal'},
'Temp: Olv Die': {'reading': '38 Celsius',
'state': 'Normal'},
'Temp: Pop Die': {'reading': '43 Celsius',
'state': 'Normal'},
'Temp: Rght Ext': {'reading': '37 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11821 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '908 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '771 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1796 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '1196 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '996 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '1044 mV',
'state': 'Normal'},
'V1: VME': {'reading': '1020 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '1098 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11748 mV',
'state': 'Normal'},
'V2: GP1': {'reading': '771 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '1096 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V2: VMA': {'reading': '3291 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V2: VMC': {'reading': '1499 mV',
'state': 'Normal'},
'V2: VMD': {'reading': '1196 mV',
'state': 'Normal'},
'V2: VME': {'reading': '1103 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '1000 mV',
'state': 'Normal'},
'V3: 12v': {'reading': '11850 mV',
'state': 'Normal'},
'V3: VDD': {'reading': '3300 mV',
'state': 'Normal'},
'V3: VMA': {'reading': '3291 mV',
'state': 'Normal'},
'V3: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V3: VMC': {'reading': '1499 mV',
'state': 'Normal'},
'V3: VMD': {'reading': '1000 mV',
'state': 'Normal'}}},
'F1': {'sensor': {'Temp: CPP Rear': {'reading': '46 Celsius',
'state': 'Normal'},
'Temp: HKP Die': {'reading': '52 Celsius',
'state': 'Normal'},
'Temp: Inlet': {'reading': '31 Celsius',
'state': 'Normal'},
'Temp: Left Ext': {'reading': '43 Celsius',
'state': 'Normal'},
'Temp: MCH Die': {'reading': '54 Celsius',
'state': 'Normal'},
'Temp: Olv Die': {'reading': '41 Celsius',
'state': 'Normal'},
'Temp: Pop Die': {'reading': '48 Celsius',
'state': 'Normal'},
'Temp: Rght Ext': {'reading': '37 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11821 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '903 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '769 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1796 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '1196 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '996 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '1049 mV',
'state': 'Normal'},
'V1: VME': {'reading': '1035 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '1098 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11762 mV',
'state': 'Normal'},
'V2: GP1': {'reading': '771 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '1088 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V2: VMA': {'reading': '3291 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V2: VMC': {'reading': '1499 mV',
'state': 'Normal'},
'V2: VMD': {'reading': '1196 mV',
'state': 'Normal'},
'V2: VME': {'reading': '1098 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '996 mV',
'state': 'Normal'},
'V3: 12v': {'reading': '11806 mV',
'state': 'Normal'},
'V3: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V3: VMA': {'reading': '3286 mV',
'state': 'Normal'},
'V3: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V3: VMC': {'reading': '1494 mV',
'state': 'Normal'},
'V3: VMD': {'reading': '996 mV',
'state': 'Normal'}}},
'P0': {'sensor': {'Iin': {'reading': '1 A', 'state': 'Normal'},
'Iout': {'reading': '15 A', 'state': 'Normal'},
'Temp1': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp2': {'reading': '31 Celsius',
'state': 'Normal'},
'Temp3': {'reading': '26 Celsius',
'state': 'Normal'},
'Vin': {'reading': '101 V AC',
'state': 'Normal'},
'Vout': {'reading': '12 V AC',
'state': 'Normal'}}},
'P1': {'sensor': {'Iin': {'reading': '2 A', 'state': 'Normal'},
'Iout': {'reading': '16 A', 'state': 'Normal'},
'Temp1': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp2': {'reading': '33 Celsius',
'state': 'Normal'},
'Temp3': {'reading': '26 Celsius',
'state': 'Normal'},
'Vin': {'reading': '101 V AC',
'state': 'Normal'},
'Vout': {'reading': '12 V AC',
'state': 'Normal'}}},
'P2': {'sensor': {'Iin': {'reading': '1 A', 'state': 'Normal'},
'Iout': {'reading': '13 A', 'state': 'Normal'},
'Temp1': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp2': {'reading': '31 Celsius',
'state': 'Normal'},
'Temp3': {'reading': '26 Celsius',
'state': 'Normal'},
'Vin': {'reading': '101 V AC',
'state': 'Normal'},
'Vout': {'reading': '12 V AC',
'state': 'Normal'}}},
'P3': {'sensor': {'Iin': {'reading': '1 A', 'state': 'Normal'},
'Iout': {'reading': '13 A', 'state': 'Normal'},
'Temp1': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp2': {'reading': '31 Celsius',
'state': 'Normal'},
'Temp3': {'reading': '26 Celsius',
'state': 'Normal'},
'Vin': {'reading': '100 V AC',
'state': 'Normal'},
'Vout': {'reading': '12 V AC',
'state': 'Normal'}}},
'P6': {'sensor': {'Temp1': {'reading': '38 Celsius',
'state': 'Normal'},
'Temp: FC PWM1': {'reading': '26 Celsius',
'state': 'Fan Speed 60%'}}},
'P7': {'sensor': {'Temp1': {'reading': '37 Celsius',
'state': 'Normal'},
'Temp: FC PWM1': {'reading': '26 Celsius',
'state': 'Fan Speed 60%'}}},
'R0': {'sensor': {'Temp: C2D C0': {'reading': '35 Celsius',
'state': 'Normal'},
'Temp: C2D C1': {'reading': '37 Celsius',
'state': 'Normal'},
'Temp: CPU AIR': {'reading': '32 Celsius',
'state': 'Normal'},
'Temp: Inlet': {'reading': '26 Celsius',
'state': 'Normal'},
'Temp: MCH AIR': {'reading': '40 Celsius',
'state': 'Normal'},
'Temp: MCH DIE': {'reading': '54 Celsius',
'state': 'Normal'},
'Temp: Outlet': {'reading': '30 Celsius',
'state': 'Normal'},
'Temp: SCBY AIR': {'reading': | |
logger.debug(f"target book: {book}")
if len(book) > 0:
notable_work = book
item_kind = "book"
logger.debug(f"notable_work: {notable_work}")
# TODO : oh my god
if notable_work:
# body = "So... "
body = ""
prompt = random.choice(this_gossip.REACTION_TO_CREATIVE_WORK[bot_emotion_towards_current_person])
prompt = prompt.replace("target_creative_work", item_kind)
prompt = prompt.replace("target_work_name", notable_work)
prompt = replace_gender(prompt, gender)
prompt = replace_occupation(prompt, bot_judgement, current_person_occupation)
# if user is creative but has no known works we skip `em`
# if:
# # body = "So... "
# body = ""
# prompt = random.choice(
# this_gossip.GENERIC_REACTION_TO_CREATIVE_WORK[bot_emotion_towards_current_person]
# )
# prompt = prompt.replace("target_creative_work", item_kind)
# ASPECT #4: SPORTSPEOPLE
if not prompt:
if "Sports" in current_cobot_topic:
item_kind = "team"
sports_kind = "sports"
# TODO : oh my god
team_name = "[[]]"
sport, teams = utils.get_teams_for_sportsperson(current_person, fake_utterance)
sports_kind = sport[0][1]
logger.debug(f"teams: {teams}")
if len(teams) > 0:
random_team = random.choice(teams)
logger.debug(f"target team: {random_team}")
if len(random_team) > 0:
team_name = random_team[1]
logger.debug(f"team name: {team_name}")
# TODO : oh my god
if "[[]]" not in str(team_name):
# body = "So... "
body = ""
prompt = random.choice(this_gossip.REACTION_TO_SPORT[bot_emotion_towards_current_person])
prompt = prompt.replace("target_sport_name", sports_kind)
prompt = prompt.replace("target_sport_team", team_name)
prompt = replace_gender(prompt, gender)
prompt = replace_occupation(prompt, bot_judgement, current_person_occupation)
# TODO : oh my god
if "[[]]" in str(team_name):
# body = "So... "
body = ""
prompt = random.choice(this_gossip.GENERIC_REACTION_TO_SPORT[bot_emotion_towards_current_person])
prompt = prompt.replace("target_sport_name", sports_kind)
prompt = replace_gender(prompt, gender)
prompt = replace_occupation(prompt, bot_judgement, current_person_occupation)
# ASPECT 5. CELEBRITY
if not prompt:
body = ""
prompt = get_celebrity_prompt(vars, current_person)
logger.info(f"usr_agrees_abt_person_response: CELEBRITY, suggested prompt: {prompt}")
if not prompt:
prompt = ""
else:
im_gender = utils.get_human_readable_gender_statement_current_im(gender)
# eir_gender = utils.get_human_readable_gender_statement_current_eir(gender)
prompt = prompt.replace("this person", im_gender)
prompt = prompt.replace("target_gender_im", im_gender)
elif prompt: # put memory to zero
# Not talking about celebrity - saving
logger.debug("Not in celebrity branch")
state_utils.save_to_shared_memory(vars, celebrity_prompt=False)
if prompt:
state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_PROMPT)
else:
state_utils.set_confidence(vars, CANNOT_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_NOT_CONTINUE)
return " ".join([body, prompt])
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
return error_response(vars)
# # STEP 2
# ################################################################################
# def sys_person_agree_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_person_agree_response(vars):
# logger.debug("exec usr_person_agree_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# # STEP 3
# ################################################################################
# def sys_says_something_after_agree_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_says_something_after_agree_response(vars):
# logger.debug("exec usr_says_something_after_agree_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# endregion
# region LOOP #3: DISAGREES_ABT_PERSON
# STEP 1
################################################################################
def sys_disagrees_abt_person_request(ngrams, vars):
flag = False
human_utterance = state_utils.get_last_human_utterance(vars)
sf_type, sf_confidence = utils.get_speech_function_for_human_utterance(human_utterance)
logger.debug(f"sys_disagrees_abt_person: Speech Function: {sf_type}")
# using speech function classifier for disagree (no)
# (with the aid of MIDAS & Intents for now)
flag = utils.is_speech_function_disagree(vars)
logger.info(f"sys_disagrees_abt_person={flag}")
return flag
def usr_disagrees_abt_person_response(vars):
logger.debug("exec usr_disagrees_abt_person_response")
try:
state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_NOT_CONTINUE)
# Wait but why... But a little bit smarter this time around
# response_text = utils.get_not_used_and_save_wait_but_why_question(vars)
response_text = "OK. I got it."
return response_text
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
return error_response(vars)
# # STEP 2
# ################################################################################
# def sys_person_disagree_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_person_disagree_response(vars):
# logger.debug("exec usr_person_disagree_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# # STEP 3
# ################################################################################
# def sys_says_something_after_disagree_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_says_something_after_disagree_response(vars):
# logger.debug("exec usr_says_something_after_disagree_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# endregion
# region LOOP #4: SAYS_OPINION_ABT_PERSON
# STEP 1
################################################################################
def sys_says_opinion_abt_person_request(ngrams, vars):
flag = False
human_utterance = state_utils.get_last_human_utterance(vars)
sf_type, sf_confidence = utils.get_speech_function_for_human_utterance(human_utterance)
logger.debug(f"sys_says_opinion_abt_person_request: Speech Function: {sf_type}")
# using speech function classifier for express_opinion
# (with the aid of MIDAS & Intents for now)
flag = utils.is_speech_function_express_opinion(vars)
logger.info(f"sys_says_opinion_abt_person_request={flag}")
return flag
def usr_says_opinion_abt_person_response(vars):
logger.debug("exec usr_says_opinion_abt_person_response")
try:
shared_memory = state_utils.get_shared_memory(vars)
# while we understand this is an opinion we don't know what it actually is
# so we use sentiment analysis as a shortcut
sentiment = state_utils.get_human_sentiment(vars, negative_threshold=0.75)
current_person = shared_memory.get("current_person", "")
# generating sentiment-based response
sentiment = state_utils.get_human_sentiment(vars, negative_threshold=0.75)
judgement = "Other"
if "negative" in sentiment:
judgement = "Disliked"
elif "positive" in sentiment:
judgement = "Liked"
elif "neutral" in sentiment:
judgement = "Other"
save_mentioned_person(vars, current_person, judgement, "people_mentioned_by_user")
prompt = random.choice(this_gossip.REACTION_TO_USER_OPINION_ABOUT_PERSON[judgement])
state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_SCENARIO)
return prompt
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
return error_response(vars)
# # STEP 2
# ################################################################################
# def sys_person_opinion_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_person_opinion_response(vars):
# logger.debug("exec usr_person_opinion_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# # STEP 3
# ################################################################################
# def sys_says_something_after_opinion_request(ngrams, vars):
# flag = False
# raise NotImplementedError() # YOUR CODE HERE
# info.info(f"weekend_request={flag}")
# return flag
# def usr_says_something_after_opinion_response(vars):
# logger.debug("exec usr_says_something_after_opinion_response")
# try:
# state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
# state_utils.set_can_continue(vars)
# response_text = "" # YOUR CODE HERE
# raise NotImplementedError() # YOUR CODE HERE
# return response_text
# except Exception as exc:
# logger.exception(exc)
# sentry_sdk.capture_exception(exc)
# return error_response(vars)
# endregion
# # region SYS_CHANGE_TO_PERSON
# ################################################################################
# def sys_change_to_person_request(ngrams, vars):
# flag = True
# # raise NotImplementedError() # YOUR CODE HERE
# info.info(f"sys_change_to_person_request={flag}")
# return flag
def usr_change_to_person_response(vars):
logger.debug("exec usr_not_interested_in_person_response")
try:
shared_memory = state_utils.get_shared_memory(vars)
# obtaining current context
current_cobot_topic = shared_memory.get("current_cobot_topic", "")
# getting human-readable version
human_topic = this_news.COBOT_TO_HUMAN_READABLE_TOPICS.get(current_cobot_topic)
# obtaining new random person + news for current cobot_topic
person = get_fresh_person_for_topic(vars, current_cobot_topic)
# Positive
target_emotion_type = "Liked"
target_judgement = get_random_judgement_for_emotion(target_emotion_type)
# saving current bot's emotion towards the currently discussed person
state_utils.save_to_shared_memory(vars, bot_emotion_towards_current_person=target_emotion_type)
# saving person to the list of people mentioned by bot, for now with "Other" judgement
save_mentioned_person(vars, person, "Liked", "people_mentioned_by_bot")
# setting current context (only person, we didn't change topic)
state_utils.save_to_shared_memory(vars, current_person=person)
# generating response
ack = condition_utils.get_not_used_and_save_sentiment_acknowledgement(vars)
prompt = random.choice(this_gossip.CHANGE_TO_OTHER_PERSON_QUESTIONS)
prompt = prompt.replace("target_person", person) if person else prompt
prompt = prompt.replace("target_topic", human_topic) if human_topic else prompt
prompt = prompt.replace("target_judgement", target_judgement)
# occupation
occupation = utils.get_occupation_for_person(person, current_cobot_topic, prompt)
prompt = prompt.replace("target_occupation", occupation)
state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_PROMPT)
return " ".join([ack, prompt])
except Exception as exc:
logger.exception(exc)
sentry_sdk.capture_exception(exc)
return error_response(vars)
# STEP 2 MENTIONS_ANOTHER_PERSON
################################################################################
def sys_mentions_another_person_request(ngrams, vars):
flag = False
use_only_last_utt = True
mentioned_by_user_people = []
found_celebrity, _ = get_celebrity_from_uttr(vars, use_only_last_utt=use_only_last_utt)
if found_celebrity:
mentioned_by_user_people.append(found_celebrity)
logger.info(f"sys_mentions_another_person_request: {mentioned_by_user_people}")
shared_memory = state_utils.get_shared_memory(vars)
current_person = shared_memory.get("current_person", "")
current_person = str(current_person)
logger.debug(f"mentioned_people: {mentioned_by_user_people}")
other_mentioned_people = [
people for people in mentioned_by_user_people if str(people).lower() != current_person.lower()
]
# checking if user mentioned at least one person
if len(other_mentioned_people) > 0:
flag = True
logger.info(f"sys_mentions_another_person_request={flag}")
return flag
def usr_mentions_another_person_response(vars):
logger.debug("exec usr_mentions_another_person_response")
try:
shared_memory = state_utils.get_shared_memory(vars)
# sf_type, sf_confidence = utils.get_speech_function_for_human_utterance(human_utterance)
# logger.debug(f"usr_mentions_another_person_response: Speech Function: {sf_type}")
# using speech function classifier for express_opinion
# (with the aid of MIDAS & Intents for now)
use_only_last_utt = True
found_celebrity, occupation = get_celebrity_from_uttr(vars, use_only_last_utt=use_only_last_utt)
# obtaining occupation (person/generic/wiki-based)
# occupation = utils.get_occupation_for_person(current_person, current_cobot_topic, body)
logger.info(f"found occupation: {occupation}")
# saving it
state_utils.save_to_shared_memory(vars, current_person_occupation=occupation)
if found_celebrity:
logger.info(f"user just mentioned these people: {found_celebrity}")
# obtaining previously mentioned people
# user_mentioned_people = get_mentioned_people(vars, share_memory_key="people_mentioned_by_user")
# checking current person
current_person = shared_memory.get("current_person", "")
body = random.choice(this_gossip.CONFUSED_WHY_USER_MENTIONED_PEOPLE)
state_utils.set_confidence(vars, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_PROMPT)
# checking if user mentioned at least one person
if found_celebrity:
state_utils.set_confidence(vars, MUST_CONTINUE_CONFIDENCE)
state_utils.set_can_continue(vars, common_constants.CAN_CONTINUE_PROMPT)
user_mentioned_person = found_celebrity
logger.info("# of mentioned people: 1")
# path #1: mentioned person is the current one (w/o coref)
if str(user_mentioned_person).lower() in str(current_person).lower():
logger.info(f"just mentioned person {user_mentioned_person} is the current_one: {current_person}")
if utils.is_speech_function_demand_opinion(vars):
if current_person in get_mentioned_people(vars, "people_mentioned_by_bot", ["Liked"]):
body = random.choice(this_gossip.SIMPLE_OPINION_ABOUT_LIKED_PERSON_PREVIOUSLY_MENTIONED_BY_BOT)
fake_utterance = f"I like to learn more about {user_mentioned_person}"
gender, age = utils.get_gender_age_person(user_mentioned_person, fake_utterance)
gender_is = utils.get_human_readable_gender_statement_current_is(gender)
body = body.replace("target_gender_is", gender_is)
elif current_person in get_mentioned_people(vars, "people_mentioned_by_bot", ["Disliked"]):
body = random.choice(
this_gossip.SIMPLE_OPINION_ABOUT_DISLIKED_PERSON_PREVIOUSLY_MENTIONED_BY_BOT
)
fake_utterance = f"I like to learn more about {user_mentioned_person}"
gender, age = utils.get_gender_age_person(user_mentioned_person, fake_utterance)
gender_is = utils.get_human_readable_gender_statement_current_is(gender)
body = body.replace("target_gender_is", gender_is)
if utils.is_speech_function_express_opinion(vars):
body = random.choice(this_gossip.SIMPLE_REACTION_TO_PERSON_PREVIOUSLY_MENTIONED_BY_BOT)
# path #2: mentioned person is the one mentioned by bot | |
<gh_stars>100-1000
#!/usr/bin/python3
import clang.cindex
from clang.cindex import CursorKind
from clang.cindex import TypeKind
from clang.cindex import TranslationUnit
import sys
from dataclasses import dataclass, field
import subprocess
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
@dataclass
class TypeDefinition:
TYPE_UNKNOWN = 0
TYPE_STRUCT = 1
TYPE_UNION = 2
TYPE_FIELD = 3
TYPE_VARDECL = 4
name: str
type: int
def __init__(self, Name, Type):
self.name = Name
self.type = Type
@property
def Name(self):
return self.name
@property
def Type(self):
return self.type
@dataclass
class AliasType:
ALIAS_X86_32 = 0
ALIAS_X86_64 = 1
ALIAS_AARCH64 = 2
ALIAS_WIN32 = 3
ALIAS_WIN64 = 4
Name: str
AliasType: int
def __init__(self, Name, Type):
self.Name = Name
self.AliasType = Type
@dataclass
class StructDefinition(TypeDefinition):
Size: int
Aliases: list
Members: list
ExpectFEXMatch: bool
def __init__(self, Name, Size):
super(StructDefinition, self).__init__(Name, TypeDefinition.TYPE_STRUCT)
self.Size = Size
self.Aliases = []
self.Members = []
self.ExpectFEXMatch = False
@dataclass
class UnionDefinition(TypeDefinition):
Size: int
Aliases: list
Members: list
ExpectFEXMatch: bool
def __init__(self, Name, Size):
super(UnionDefinition, self).__init__(Name, TypeDefinition.TYPE_UNION)
self.Size = Size
self.Aliases = []
self.Members = []
self.ExpectFEXMatch = False
@dataclass
class FieldDefinition(TypeDefinition):
Size: int
OffsetOf: int
Alignment: int
def __init__(self, Name, Size, OffsetOf, Alignment):
super(FieldDefinition, self).__init__(Name, TypeDefinition.TYPE_FIELD)
self.Size = Size
self.OffsetOf = OffsetOf
self.Alignment = Alignment
@dataclass
class VarDeclDefinition(TypeDefinition):
Size: int
Aliases: list
ExpectFEXMatch: bool
Value: str
def __init__(self, Name, Size):
super(VarDeclDefinition, self).__init__(Name, TypeDefinition.TYPE_VARDECL)
self.Size = Size
self.Aliases = []
self.ExpectFEXMatch = False
@dataclass
class ArchDB:
Parsed: bool
ArchName: str
NamespaceScope: list
CurrentNamespace: str
TU: TranslationUnit
Structs: dict
Unions: dict
VarDecls: dict
FieldDecls: list
def __init__(self, ArchName):
self.Parsed = True
self.ArchName = ArchName
self.NamespaceScope = []
self.CurrentNamespace = ""
self.TU = None
self.Structs = {}
self.Unions = {}
self.VarDecls = {}
self.FieldDecls = []
@dataclass
class FunctionDecl:
Name: str
Ret: str
Params: list
def __init__(self, Name, Ret):
self.Name = Name
self.Ret = Ret
self.Params = []
FunctionDecls = []
def HandleFunctionDeclCursor(Arch, Cursor):
if (Cursor.is_definition()):
return Arch
#logging.critical ("Unhandled FunctionDeclCursor {0}-{1}-{2}-{3}".format(Cursor.kind, Cursor.type.spelling, Cursor.spelling,
# Cursor.result_type.spelling))
Function = FunctionDecl(Cursor.spelling, Cursor.result_type.spelling)
for Child in Cursor.get_children():
if (Child.kind == CursorKind.TYPE_REF):
# This will give us the return type
# We skip this since we get it at the start instead
pass
elif (Child.kind == CursorKind.PARM_DECL):
# This gives us a parameter type
Function.Params.append(Child.type.spelling)
elif (Child.kind == CursorKind.UNEXPOSED_ATTR):
# Whatever you are we don't care about you
return Arch
elif (Child.kind == CursorKind.ASM_LABEL_ATTR):
# Whatever you are we don't care about you
return Arch
elif (Child.kind == CursorKind.VISIBILITY_ATTR):
pass
else:
logging.critical ("\tUnhandled FunctionDeclCursor {0}-{1}-{2}".format(Child.kind, Child.type.spelling, Child.spelling))
sys.exit(-1)
FunctionDecls.append(Function)
return Arch
def PrintFunctionDecls():
for Decl in FunctionDecls:
print("fn(\"{0} {1}({2})\")".format(Decl.Ret, Decl.Name, ", ".join(Decl.Params)))
def FindClangArguments(OriginalArguments):
AddedArguments = ["clang"]
AddedArguments.extend(OriginalArguments)
AddedArguments.extend(["-v", "-x", "c++", "-S", "-"])
Proc = subprocess.Popen(AddedArguments, stderr = subprocess.PIPE, stdin = subprocess.DEVNULL)
NewIncludes = []
BeginSearch = False
while True:
Line = Proc.stderr.readline().strip()
if not Line:
Proc.terminate()
break
if (Line == b"End of search list."):
BeginSearch = False
Proc.terminate()
break
if (BeginSearch == True):
NewIncludes.append("-I" + Line.decode('ascii'))
if (Line == b"#include <...> search starts here:"):
BeginSearch = True
# Add back original arguments
NewIncludes.extend(OriginalArguments)
return NewIncludes
def SetNamespace(Arch):
Arch.CurrentNamespace = ""
for Namespace in Arch.NamespaceScope:
Arch.CurrentNamespace = Arch.CurrentNamespace + Namespace + "::"
def HandleStructDeclCursor(Arch, Cursor, NameOverride = ""):
# Append namespace
CursorName = ""
StructType = Cursor.type
if (len(StructType.spelling) == 0):
CursorName = NameOverride
else:
CursorName = StructType.spelling
if (len(CursorName) != 0):
Arch.NamespaceScope.append(CursorName)
SetNamespace(Arch)
Struct = StructDefinition(
Name = CursorName,
Size = StructType.get_size())
# Handle children
Arch.Structs[Struct.Name] = HandleStructElements(Arch, Struct, Cursor)
# Pop namespace off
if (len(CursorName) != 0):
Arch.NamespaceScope.pop()
SetNamespace(Arch)
return Arch
def HandleUnionDeclCursor(Arch, Cursor, NameOverride = ""):
# Append namespace
CursorName = ""
if (len(Cursor.spelling) == 0):
CursorName = NameOverride
else:
CursorName = Cursor.spelling
if (len(CursorName) != 0):
Arch.NamespaceScope.append(CursorName)
SetNamespace(Arch)
UnionType = Cursor.type
Union = UnionDefinition(
Name = CursorName,
Size = UnionType.get_size())
Arch.Unions[Union.Name] = Union
# Handle children
Arch.Unions[Union.Name] = HandleStructElements(Arch, Union, Cursor)
# Pop namespace off
if (len(CursorName) != 0):
Arch.NamespaceScope.pop()
SetNamespace(Arch)
return Arch
def HandleVarDeclCursor(Arch, Cursor):
CursorName = Cursor.spelling
DeclType = Cursor.type
Def = Cursor.get_definition()
VarDecl = VarDeclDefinition(
Name = CursorName,
Size = DeclType.get_size())
Arch.VarDecls[VarDecl.Name] = HandleVarDeclElements(Arch, VarDecl, Cursor)
return Arch
def HandleVarDeclElements(Arch, VarDecl, Cursor):
for Child in Cursor.get_children():
if (Child.kind == CursorKind.ANNOTATE_ATTR):
if (Child.spelling.startswith("ioctl-alias-")):
Sections = Child.spelling.split("-")
if (Sections[2] == "x86_32"):
VarDecl.Aliases.append(AliasType(Sections[3], AliasType.ALIAS_X86_32))
elif (Sections[2] == "x86_64"):
VarDecl.Aliases.append(AliasType(Sections[3], AliasType.ALIAS_X86_64))
elif (Sections[2] == "aarch64"):
VarDecl.Aliases.append(AliasType(Sections[3], AliasType.ALIAS_AARCH64))
elif (Sections[2] == "win32"):
VarDecl.Aliases.append(AliasType(Sections[3], AliasType.ALIAS_WIN32))
elif (Sections[2] == "win64"):
VarDecl.Aliases.append(AliasType(Sections[3], AliasType.ALIAS_WIN64))
else:
logging.critical ("Can't handle alias type '{0}'".format(Child.spelling))
Arch.Parsed = False
elif (Child.spelling == "fex-match"):
VarDecl.ExpectedFEXMatch = True
else:
# Unknown annotation
pass
elif (Child.kind == CursorKind.TYPE_REF or
Child.kind == CursorKind.UNEXPOSED_EXPR or
Child.kind == CursorKind.PAREN_EXPR or
Child.kind == CursorKind.BINARY_OPERATOR
):
pass
return VarDecl
def HandleTypeDefDeclCursor(Arch, Cursor):
TypeDefType = Cursor.underlying_typedef_type
CanonicalType = TypeDefType.get_canonical()
TypeDefName = Cursor.type.get_typedef_name()
if (TypeDefType.kind == TypeKind.ELABORATED and CanonicalType.kind == TypeKind.RECORD):
if (len(TypeDefName) != 0):
HandleTypeDefDecl(Arch, Cursor, TypeDefName)
# Append namespace
Arch.NamespaceScope.append(TypeDefName)
SetNamespace(Arch)
Arch = HandleCursor(Arch, Cursor)
#StructType = Cursor.type
#Struct = StructDefinition(
# Name = TypeDefName,
# Size = CanonicalType.get_size())
#Arch.Structs[TypeDefName] = Struct
## Handle children
#Arch.Structs[TypeDefName] = HandleStructElements(Arch, Struct, Cursor)
# Pop namespace off
Arch.NamespaceScope.pop()
SetNamespace(Arch)
else:
if (len(TypeDefName) != 0):
Def = Cursor.get_definition()
VarDecl = VarDeclDefinition(
Name = TypeDefName,
Size = CanonicalType.get_size())
Arch.VarDecls[VarDecl.Name] = HandleVarDeclElements(Arch, VarDecl, Cursor)
return Arch
def HandleStructElements(Arch, Struct, Cursor):
for Child in Cursor.get_children():
# logging.info ("\t\tStruct/Union Children: Cursor \"{0}{1}\" of kind {2}".format(Arch.CurrentNamespace, Child.spelling, Child.kind))
if (Child.kind == CursorKind.ANNOTATE_ATTR):
if (Child.spelling.startswith("alias-")):
Sections = Child.spelling.split("-")
if (Sections[1] == "x86_32"):
Struct.Aliases.append(AliasType(Sections[2], AliasType.ALIAS_X86_32))
elif (Sections[1] == "x86_64"):
Struct.Aliases.append(AliasType(Sections[2], AliasType.ALIAS_X86_64))
elif (Sections[1] == "aarch64"):
Struct.Aliases.append(AliasType(Sections[2], AliasType.ALIAS_AARCH64))
elif (Sections[1] == "win32"):
Struct.Aliases.append(AliasType(Sections[2], AliasType.ALIAS_WIN32))
elif (Sections[1] == "win64"):
Struct.Aliases.append(AliasType(Sections[2], AliasType.ALIAS_WIN64))
else:
logging.critical ("Can't handle alias type '{0}'".format(Child.spelling))
Arch.Parsed = False
elif (Child.spelling == "fex-match"):
Struct.ExpectedFEXMatch = True
else:
# Unknown annotation
pass
elif (Child.kind == CursorKind.FIELD_DECL):
ParentType = Cursor.type
FieldType = Child.type
Field = FieldDefinition(
Name = Child.spelling,
Size = FieldType.get_size(),
OffsetOf = ParentType.get_offset(Child.spelling),
Alignment = FieldType.get_align())
#logging.info ("\t{0}".format(Child.spelling))
#logging.info ("\t\tSize of type: {0}".format(FieldType.get_size()));
#logging.info ("\t\tAlignment of type: {0}".format(FieldType.get_align()));
#logging.info ("\t\tOffsetof of type: {0}".format(ParentType.get_offset(Child.spelling)));
Struct.Members.append(Field)
Arch.FieldDecls.append(Field)
elif (Child.kind == CursorKind.STRUCT_DECL):
ParentType = Cursor.type
FieldType = Child.type
Field = FieldDefinition(
Name = Child.spelling,
Size = FieldType.get_size(),
OffsetOf = ParentType.get_offset(Child.spelling),
Alignment = FieldType.get_align())
#logging.info ("\t{0}".format(Child.spelling))
#logging.info ("\t\tSize of type: {0}".format(FieldType.get_size()));
#logging.info ("\t\tAlignment of type: {0}".format(FieldType.get_align()));
#logging.info ("\t\tOffsetof of type: {0}".format(ParentType.get_offset(Child.spelling)));
Struct.Members.append(Field)
Arch.FieldDecls.append(Field)
Arch = HandleStructDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.UNION_DECL):
Struct = HandleStructElements(Arch, Struct, Child)
#ParentType = Cursor.type
#FieldType = Child.type
#Field = FieldDefinition(
# Name = Child.spelling,
# Size = FieldType.get_size(),
# OffsetOf = ParentType.get_offset(Child.spelling),
# Alignment = FieldType.get_align())
#logging.info ("\t{0}".format(Child.spelling))
#logging.info ("\t\tSize of type: {0}".format(FieldType.get_size()));
#logging.info ("\t\tAlignment of type: {0}".format(FieldType.get_align()));
#logging.info ("\t\tOffsetof of type: {0}".format(ParentType.get_offset(Child.spelling)));
#Struct.Members.append(Field)
#Arch.FieldDecls.append(Field)
#Arch = HandleUnionDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.TYPEDEF_DECL):
Arch = HandleTypeDefDeclCursor(Arch, Child)
else:
Arch = HandleCursor(Arch, Child)
return Struct
def HandleTypeDefDecl(Arch, Cursor, Name):
for Child in Cursor.get_children():
if (Child.kind == CursorKind.UNION_DECL):
pass
elif (Child.kind == CursorKind.STRUCT_DECL):
Arch = HandleStructDeclCursor(Arch, Child, Name)
elif (Child.kind == CursorKind.UNION_DECL):
Arch = HandleUnionDeclCursor(Arch, Child, Name)
elif (Child.kind == CursorKind.TYPEDEF_DECL):
Arch = HandleTypeDefDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.TYPE_REF or
Child.kind == CursorKind.NAMESPACE_REF or
Child.kind == CursorKind.TEMPLATE_REF or
Child.kind == CursorKind.ALIGNED_ATTR):
# Safe to pass on
pass
else:
logging.critical ("Unhandled TypedefDecl {0}-{1}-{2}".format(Child.kind, Child.type.spelling, Child.spelling))
def HandleCursor(Arch, Cursor):
if (Cursor.kind.is_invalid()):
Diags = TU.diagnostics
for Diag in Diags:
logging.warning (Diag.format())
Arch.Parsed = False
return
for Child in Cursor.get_children():
if (Child.kind == CursorKind.TRANSLATION_UNIT):
Arch = HandleCursor(Arch, Child)
elif (Child.kind == CursorKind.FIELD_DECL):
pass
elif (Child.kind == CursorKind.UNION_DECL):
Arch = HandleUnionDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.STRUCT_DECL):
Arch = HandleStructDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.TYPEDEF_DECL):
Arch = HandleTypeDefDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.VAR_DECL):
Arch = HandleVarDeclCursor(Arch, Child)
elif (Child.kind == CursorKind.NAMESPACE):
# Append namespace
Arch.NamespaceScope.append(Child.spelling)
SetNamespace(Arch)
# Handle children
Arch = HandleCursor(Arch, Child)
# Pop namespace off
Arch.NamespaceScope.pop()
SetNamespace(Arch)
elif (Child.kind == CursorKind.TYPE_REF):
# Safe to pass on
pass
elif (Child.kind == CursorKind.FUNCTION_DECL):
# For function printing
Arch = HandleFunctionDeclCursor(Arch, Child)
else:
Arch = HandleCursor(Arch, Child)
return Arch
def GetDB(Arch, filename, args):
Index = clang.cindex.Index.create()
try:
TU = Index.parse(filename, args=args, options=TranslationUnit.PARSE_INCOMPLETE)
except TranslationUnitLoadError:
Arch.Parsed = False
Diags = TU.diagnostics
for Diag in Diags:
logging.warning (Diag.format())
return
Arch.TU = TU
FunctionDecls.clear()
HandleCursor(Arch, TU.cursor)
# Get diagnostics
Diags = TU.diagnostics
if (len(Diags) != 0):
logging.warning ("Diagnostics from Arch: {0}".format(Arch.ArchName))
for Diag in Diags:
logging.warning (Diag.format())
return | |
- start, end, start
def get_cbm_vbm(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the cbm and vbm.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
(cbm, vbm): float in eV corresponding to the gap
"""
# determine tolerance
tdos = self.get_densities(spin)
if not abs_tol:
tol = tol * tdos.sum() / tdos.shape[0]
# find index of fermi energy
i_fermi = 0
while self.energies[i_fermi] <= self.efermi:
i_fermi += 1
# work backwards until tolerance is reached
i_gap_start = i_fermi
while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol:
i_gap_start -= 1
# work forwards until tolerance is reached
i_gap_end = i_gap_start
while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol:
i_gap_end += 1
i_gap_end -= 1
return self.energies[i_gap_end], self.energies[i_gap_start]
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None):
"""
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
"""
(cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin)
return max(cbm - vbm, 0.0)
def __str__(self):
"""
Returns a string which can be easily plotted (using gnuplot).
"""
if Spin.down in self.densities:
stringarray = ["#{:30s} {:30s} {:30s}".format("Energy", "DensityUp", "DensityDown")]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f} {self.densities[Spin.down][i]:.5f}")
else:
stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")]
for i, energy in enumerate(self.energies):
stringarray.append(f"{energy:.5f} {self.densities[Spin.up][i]:.5f}")
return "\n".join(stringarray)
@classmethod
def from_dict(cls, d) -> "Dos":
"""
Returns Dos object from dict representation of Dos.
"""
return Dos(
d["efermi"],
d["energies"],
{Spin(int(k)): v for k, v in d["densities"].items()},
)
def as_dict(self) -> dict:
"""
Json-serializable dict representation of Dos.
"""
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"efermi": self.efermi,
"energies": self.energies.tolist(),
"densities": {str(spin): dens.tolist() for spin, dens in self.densities.items()},
}
class FermiDos(Dos, MSONable):
"""
This wrapper class helps relate the density of states, doping levels
(i.e. carrier concentrations) and corresponding fermi levels. A negative
doping concentration indicates the majority carriers are electrons
(n-type doping); a positive doping concentration indicates holes are the
majority carriers (p-type doping).
"""
def __init__(
self,
dos: Dos,
structure: Structure = None,
nelecs: float = None,
bandgap: float = None,
):
"""
Args:
dos: Pymatgen Dos object.
structure: A structure. If not provided, the structure
of the dos object will be used. If the dos does not have an
associated structure object, an error will be thrown.
nelecs: The number of electrons included in the energy range of
dos. It is used for normalizing the densities. Default is the total
number of electrons in the structure.
bandgap: If set, the energy values are scissored so that the electronic
band gap matches this value.
"""
super().__init__(
dos.efermi,
energies=dos.energies,
densities={k: np.array(d) for k, d in dos.densities.items()},
)
if structure is None:
if hasattr(dos, "structure"):
structure = dos.structure
else:
raise ValueError("Structure object is not provided and not present in dos")
self.structure = structure
self.nelecs = nelecs or self.structure.composition.total_electrons
self.volume = self.structure.volume
self.energies = np.array(dos.energies)
self.de = np.hstack((self.energies[1:], self.energies[-1])) - self.energies
# normalize total density of states based on integral at 0K
tdos = np.array(self.get_densities())
self.tdos = tdos * self.nelecs / (tdos * self.de)[self.energies <= self.efermi].sum()
ecbm, evbm = self.get_cbm_vbm()
self.idx_vbm = int(np.argmin(abs(self.energies - evbm)))
self.idx_cbm = int(np.argmin(abs(self.energies - ecbm)))
self.A_to_cm = 1e-8
if bandgap:
if evbm < self.efermi < ecbm:
eref = self.efermi
else:
eref = (evbm + ecbm) / 2.0
idx_fermi = int(np.argmin(abs(self.energies - eref)))
if idx_fermi == self.idx_vbm:
# Fermi level and vbm should be different indices
idx_fermi += 1
self.energies[:idx_fermi] -= (bandgap - (ecbm - evbm)) / 2.0
self.energies[idx_fermi:] += (bandgap - (ecbm - evbm)) / 2.0
def get_doping(self, fermi_level: float, temperature: float) -> float:
"""
Calculate the doping (majority carrier concentration) at a given
fermi level and temperature. A simple Left Riemann sum is used for
integrating the density of states over energy & equilibrium Fermi-Dirac
distribution.
Args:
fermi_level: The fermi_level level in eV.
temperature: The temperature in Kelvin.
Returns:
The doping concentration in units of 1/cm^3. Negative values
indicate that the majority carriers are electrons (n-type doping)
whereas positivie values indicates the majority carriers are holes
(p-type doping).
"""
cb_integral = np.sum(
self.tdos[self.idx_cbm :]
* f0(self.energies[self.idx_cbm :], fermi_level, temperature)
* self.de[self.idx_cbm :],
axis=0,
)
vb_integral = np.sum(
self.tdos[: self.idx_vbm + 1]
* f0(-self.energies[: self.idx_vbm + 1], -fermi_level, temperature)
* self.de[: self.idx_vbm + 1],
axis=0,
)
return (vb_integral - cb_integral) / (self.volume * self.A_to_cm ** 3)
def get_fermi_interextrapolated(
self, concentration: float, temperature: float, warn: bool = True, c_ref: float = 1e10, **kwargs
) -> float:
"""
Similar to get_fermi except that when get_fermi fails to converge,
an interpolated or extrapolated fermi is returned with the assumption
that the fermi level changes linearly with log(abs(concentration)).
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
warn: Whether to give a warning the first time the fermi cannot be
found.
c_ref: A doping concentration where get_fermi returns a
value without error for both c_ref and -c_ref.
**kwargs: Keyword arguments passed to the get_fermi function.
Returns:
The Fermi level. Note, the value is possibly interpolated or
extrapolated and must be used with caution.
"""
try:
return self.get_fermi(concentration, temperature, **kwargs)
except ValueError as e:
if warn:
warnings.warn(str(e))
if abs(concentration) < c_ref:
if abs(concentration) < 1e-10:
concentration = 1e-10
# max(10, ) is to avoid log(0<x<1) and log(1+x) both of which
# are slow
f2 = self.get_fermi_interextrapolated(
max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
f1 = self.get_fermi_interextrapolated(
-max(10, abs(concentration) * 10.0), temperature, warn=False, **kwargs
)
c2 = np.log(abs(1 + self.get_doping(f2, temperature)))
c1 = -np.log(abs(1 + self.get_doping(f1, temperature)))
slope = (f2 - f1) / (c2 - c1)
return f2 + slope * (np.sign(concentration) * np.log(abs(1 + concentration)) - c2)
f_ref = self.get_fermi_interextrapolated(np.sign(concentration) * c_ref, temperature, warn=False, **kwargs)
f_new = self.get_fermi_interextrapolated(concentration / 10.0, temperature, warn=False, **kwargs)
clog = np.sign(concentration) * np.log(abs(concentration))
c_newlog = np.sign(concentration) * np.log(abs(self.get_doping(f_new, temperature)))
slope = (f_new - f_ref) / (c_newlog - np.sign(concentration) * 10.0)
return f_new + slope * (clog - c_newlog)
def get_fermi(
self,
concentration: float,
temperature: float,
rtol: float = 0.01,
nstep: int = 50,
step: float = 0.1,
precision: int = 8,
):
"""
Finds the fermi level at which the doping concentration at the given
temperature (T) is equal to concentration. A greedy algorithm is used
where the relative error is minimized by calculating the doping at a
grid which continually becomes finer.
Args:
concentration: The doping concentration in 1/cm^3. Negative values
represent n-type doping and positive values represent p-type
doping.
temperature: The temperature in Kelvin.
rtol: The maximum acceptable relative error.
nstep: THe number of steps checked around a given fermi level.
step: Initial step in energy when searching for the Fermi level.
precision: Essentially the decimal places of calculated Fermi level.
Returns:
The fermi level in eV.. Note that this is different from the default
dos.efermi.
"""
fermi = self.efermi # initialize target fermi
relative_error = [float("inf")]
for _ in range(precision):
frange = np.arange(-nstep, nstep + 1) * step + fermi
calc_doping = np.array([self.get_doping(f, temperature) for f in frange])
relative_error = np.abs(calc_doping / concentration - 1.0) # type: ignore
fermi = frange[np.argmin(relative_error)]
step /= 10.0
if min(relative_error) > rtol:
raise ValueError(f"Could not find fermi within {rtol * 100}% of concentration={concentration}")
return fermi
@classmethod
def from_dict(cls, d) | |
<filename>corems/molecular_id/calc/math_distance.py
import numpy as np
import scipy.stats
'''exploratory module based on Yuanyue Li code at
TODO add GitHub and Paper here'''
def entropy_distance(v, y):
merged = v + y
entropy_increase = 2 * scipy.stats.entropy(merged) - scipy.stats.entropy(v) - scipy.stats.entropy(y)
return entropy_increase
def _weight_intensity_for_entropy(x):
if sum(x) > 0:
WEIGHT_START = 0.25
WEIGHT_SLOPE = 0.5
entropy_x = scipy.stats.entropy(x)
weight = WEIGHT_START + WEIGHT_SLOPE * entropy_x
x = np.power(x, weight)
x = x / sum(x)
return x
def weighted_entropy_distance(v, y):
v = _weight_intensity_for_entropy(v)
y = _weight_intensity_for_entropy(y)
merged = v + y
entropy_increase = 2 * scipy.stats.entropy(merged) - scipy.stats.entropy(v) - scipy.stats.entropy(y)
return entropy_increase
def chebyshev_distance(v, y):
r"""
Chebyshev distance:
.. math::
\underset{i}{\max}{(|v_{i}\ -\ y_{i}|)}
"""
return np.max(np.abs(v - y))
def squared_euclidean_distance(v, y):
r"""
Squared Euclidean distance:
.. math::
\sum(v_{i}-y_{i})^2
"""
return np.sum(np.power(v - y, 2))
def fidelity_similarity(v, y):
r"""
Fidelity similarity:
.. math::
\sum\sqrt{v_{i}y_{i}}
"""
return np.sum(np.sqrt(v * y))
def matusita_distance(v, y):
r"""
Matusita distance:
.. math::
\sqrt{\sum(\sqrt{v_{i}}-\sqrt{y_{i}})^2}
"""
return np.sqrt(np.sum(np.power(np.sqrt(v) - np.sqrt(y), 2)))
def squared_chord_distance(v, y):
r"""
Squared-chord distance:
.. math::
\sum(\sqrt{v_{i}}-\sqrt{y_{i}})^2
"""
return np.sum(np.power(np.sqrt(v) - np.sqrt(y), 2))
def bhattacharya_1_distance(v, y):
r"""
Bhattacharya 1 distance:
.. math::
(\arccos{(\sum\sqrt{v_{i}y_{i}})})^2
"""
s = np.sum(np.sqrt(v * y))
# TODO:Fix this!
if s > 1:
if s > 1 + 1e-6:
print("Error in calculating Bhattacharya 1 distance, got arccos {}".format(s))
s = 1
return np.power(np.arccos(s), 2)
def bhattacharya_2_distance(v, y):
r"""
Bhattacharya 2 distance:
.. math::
-\ln{(\sum\sqrt{v_{i}y_{i}})}
"""
s = np.sum(np.sqrt(v * y))
if s == 0:
return np.inf
else:
return -np.log(s)
def harmonic_mean_similarity(v, y):
r"""
Harmonic mean similarity:
.. math::
#1-2\sum(\frac{v_{i}y_{i}}{v_{i}+y_{i}})
2\sum(\frac{v_{i}y_{i}}{v_{i}+y_{i}})
"""
#return 1 - 2 * np.sum(v * y / (v + y))
return 2 * np.sum(v * y / (v + y))
#def pearson_chi_squared_distance(v, y):
# r"""
# Pearson χ2 distance:
#
# .. math::
#
# \sum\frac{(v_{i}-y_{i})^2}{y_{i}}
# """
# return np.sum(np.power(v - y, 2) / y)
#def neyman_chi_squared_distance(v, y):
# r"""
# Neyman χ2 distance:
#
# .. math::
#
# \sum\frac{(v_{i}-y_{i})^2}{v_{i}}
# """
# return np.sum(np.power(v - y, 2) / v)
#def probabilistic_symmetric_chi_squared_distance(v, y):
# r"""
# Probabilistic symmetric χ2 distance:
#
# .. math::
#
# \frac{1}{2} \times \sum\frac{(v_{i}-y_{i}\ )^2}{v_{i}+y_{i}\ }
# """
# return 1 / 2 * np.sum(np.power(v - y, 2) / (v + y))
#def topsoe_distance(v, y):
# r"""
# Topsøe distance:
#
# .. math::
#
# \sum{(v_{i}ln\frac{v_{i}}{Z_i}+y_{i}ln\frac{y_{i}}{Z_i}),\ \ \ Z_i=\frac{1}{2}(v_{i}+y_{i})}
# """
# z = 1 / 2 * (v + y)
# z[z == 0] = 1
# vz = v / z
# yz = y / z
# vz[v == 0] = 1
# yz[y == 0] = 1
# return np.sum(v * np.log(vz) + y * np.log(yz))
def chernoff_distance(v, y):
r"""
Chernoff distance:
.. math::
\max{(-ln\sum(v_{i}^ty_{i}^{1-t})^{1-t})},\ t=0.1,\ 0\le\ t<1
"""
t = 0.1
return np.max(-np.log(
np.sum(np.power(np.power(v, t) * np.power(y, 1 - t), 1 - t))))
def ruzicka_distance(v, y):
r"""
Ruzicka distance:
.. math::
\frac{\sum{|v_{i}-y_{i}|}}{\sum{\max(v_{i},y_{i})}}
"""
dist = np.sum(np.abs(v - y)) / np.sum(np.maximum(v, y))
return dist
def roberts_distance(v, y):
r"""
Roberts distance:
.. math::
1-\sum\frac{(v_{i}+y_{i})\frac{\min{(v_{i},y_{i})}}{\max{(v_{i},y_{i})}}}{\sum(v_{i}+y_{i})}
"""
return 1 - np.sum((v + y) * np.minimum(v, y) / np.maximum(v, y) / np.sum(v + y))
def intersection_distance(v, y):
r"""
Intersection distance:
.. math::
1-\frac{\sum\min{(v_{i},y_{i})}}{\min(\sum{v_{i},\sum{y_{i})}}}
"""
return 1 - np.sum(np.minimum(v, y)) / min(np.sum(v), np.sum(y))
def motyka_distance(v, y):
r"""
Motyka distance:
.. math::
-\frac{\sum\min{(y_{i},v_{i})}}{\sum(y_{i}+v_{i})}
"""
dist = np.sum(np.minimum(v, y)) / np.sum(v + y)
return dist
def canberra_distance(v, y):
r"""
Canberra distance:
.. math::
#\sum\frac{|v_{i}-y_{i}|}{|v_{i}|+|y_{i}|}
\sum_{i}\frac{|y_{i} - v_{i}|}{y_{i} + v_{i}}
"""
#return np.sum(np.abs(v - y) / (np.abs(v) + np.abs(y)))
return np.sum(np.abs(y - v)/(y + v))
def canberra_metric(v, y):
r"""
Canberra Metric
.. math::
\frac{1}{\sum_{i}I(v_{i}\neq 0)}\sum_{i}\frac{|y_{i}-v_{i}|}{(y_{i}+v_{i})}
"""
return (1 / np.sum(v > 0)) * np.sum(np.abs(y - v)/(y + v))
def kulczynski_1_distance(v, y):
r"""
Kulczynski 1 distance:
.. math::
\frac{\sum{|v_i}-y_i|}{\sum m\ i\ n\ (v_i,y_i)}
"""
return np.sum(np.abs(y - v)) / np.sum(np.minimum(y, v))
def baroni_urbani_buser_distance(v, y):
r"""
Baroni-Urbani-Buser distance:
.. math::
1-\frac{\sum\min{(v_i,y_i)}+\sqrt{\sum\min{(v_i,y_i)}\sum(\max{(v)}-\max{(v_i,y_i)})}}{\sum{\max{(v_i,y_i)}+\sqrt{\sum{\min{(v_i,y_i)}\sum(\max{(v)}-\max{(v_i,y_i)})}}}}
"""
if np.max(v) < np.max(y):
v, y = y, v
d1 = np.sqrt(np.sum(np.minimum(v, y) * np.sum(max(v) - np.maximum(v, y))))
return 1 - (np.sum(np.minimum(v, y)) + d1) / (np.sum(np.maximum(v, y)) + d1)
def penrose_size_distance(v, y):
r"""
Penrose size distance:
.. math::
\sqrt N\sum{|y_i-v_i|}
"""
n = np.sum(v > 0)
return np.sqrt(n) * np.sum(np.abs(y - v))
def mean_character_distance(v, y):
r"""
Mean character distance:
.. math::
\frac{1}{N}\sum{|y_i-v_i|}
"""
n = np.sum(v > 0)
return 1 / n * np.sum(np.abs(y - v))
def lorentzian_distance(v, y):
r"""
Lorentzian distance:
.. math::
\sum{\ln(1+|v_i-y_i|)}
"""
return np.sum(np.log(1 + np.abs(y - v)))
def penrose_shape_distance(v, y):
r"""
Penrose shape distance:
.. math::
\sqrt{\sum((v_i-\bar{v})-(y_i-\bar{y}))^2}
"""
v_avg = np.mean(v)
y_avg = np.mean(y)
return np.sqrt(np.sum(np.power((y - y_avg) - (v - v_avg), 2)))
def clark_distance(v, y):
r"""
Clark distance:
.. math::
#(\frac{1}{N}\sum(\frac{v_i-y_i}{|v_i|+|y_i|})^2)^\frac{1}{2}
\sqrt{\sum(\frac{|v_i-y_i|}{v_i+y_i})^2}
"""
#n = np.sum(v > 0)
#return np.sqrt(1 / n * np.sum(np.power((v - y) / (np.abs(v) + np.abs(y)), 2)))
return np.sqrt(np.sum(np.power(np.abs(y - v) / (y + v), 2)))
def hellinger_distance(v, y):
r"""
Hellinger distance:
.. math::
#\sqrt{2\sum(\sqrt{\frac{v_i}{\bar{v}}}-\sqrt{\frac{y_i}{\bar{y}}})^2}
\sqrt{2\sum(\sqrt{v_i}-\sqrt{y_i})^2}
"""
#v_avg = np.mean(v)
#y_avg = np.mean(y)
#return np.sqrt(2 * np.sum(np.power(np.sqrt(v / v_avg) - np.sqrt(y / y_avg), 2)))
return np.sqrt(2 * np.sum(np.power(np.sqrt(y) - np.sqrt(v), 2)))
def whittaker_index_of_association_distance(v, y):
r"""
Whittaker index of association distance:
.. math::
\frac{1}{2}\sum|\frac{v_i}{\bar{v}}-\frac{y_i}{\bar{y}}|
"""
v_avg = np.mean(v)
y_avg = np.mean(y)
return 1 / 2 * np.sum(np.abs(v / v_avg - y / y_avg))
#def symmetric_chi_squared_distance(v, y):
# r"""
# Symmetric χ2 distance:
#
# .. math::
#
# \sqrt{\sum{\frac{\bar{v}+\bar{y}}{N(\bar{v}+\bar{y})^2}\frac{(v_i\bar{y}-y_i\bar{v})^2}{v_i+y_i}\ }}
# """
# v_avg = np.mean(v)
# y_avg = np.mean(y)
# n = np.sum(v > 0)
#
# d1 = (v_avg + y_avg) / (n * np.power(v_avg + y_avg, 2))
# return np.sqrt(d1 * np.sum(np.power(v * y_avg - y * v_avg, 2) / (v + y)))
def similarity_index_distance(v, y):
r"""
Similarity Index Distance:
.. math::
\sqrt{\frac{\sum\{\frac{v_i-y_i}{y_i}\}^2}{N}}
"""
n = np.sum(v > 0)
return np.sqrt(1 / n * np.sum(np.power((v - y) / y, 2)))
def improved_similarity_distance(v, y):
r"""
Improved Similarity Index:
.. math::
\sqrt{\frac{1}{N}\sum\{\frac{y_i-v_i}{y_i+v_i}\}^2}
"""
n = np.sum(v > 0)
return np.sqrt(1 / n * np.sum(np.power((y - v) / (y + v), 2)))
def absolute_value_distance(v, y):
r"""
Absolute Value Distance:
.. math::
\frac { \sum(|y_i-v_i|)}{\sum v_i}
"""
dist = np.sum(np.abs(y - v)) / np.sum(v)
return dist
def spectral_contrast_angle_distance(v, y):
r"""
Spectral Contrast Angle:
.. math::
1 - \frac{\sum{y_iv_i}}{\sqrt{\sum y_i^2\sum v_i^2}}
\arccos(\frac{\sum_{P}y_{p}^* v_{p}^*}{\sqrt{\sum_{P}y_{p}^{*2} \sum_{P}v_{p}^{*2}}})
"""
#return 1 - np.sum(y * v) / \
# np.sqrt(np.sum(np.power(y, 2)) * np.sum(np.power(v, 2)))
return np.arccos(np.sum(y * v) / (np.sqrt(np.sum(np.power(y, 2)) * np.sum(np.power(v, 2)))))
def wave_hedges_distance(v, y):
r"""
Wave Hedges distance:
.. math::
\sum\frac{|v_i-y_i|}{\max{(v_i,y_i)}}
"""
return np.sum(np.abs(v - y) / np.maximum(v, y))
def dice_similarity(v, y):
r"""
Dice similarity:
.. math::
\frac{\sum(v_i-y_i)^2}{\sum v_i^2+\sum y_i^2}
\frac{2 * \sum_{i}v_{i}y_{i}}{\sum_{i}y_{i}^2 + \sum_{i}v_{i}^2}
"""
return 2 * np.sum(v * y) / (np.sum(np.power(v, 2)) + np.sum(np.power(y, 2)))
def inner_product_distance(v, y):
r"""
Inner Product distance:
.. math::
1-\sum{v_iy_i}
"""
return 1 - np.sum(v * y)
def divergence_distance(v, y):
r"""
Divergence distance:
.. math::
2\sum\frac{(v_i-y_i)^2}{(v_i+y_i)^2}
"""
return 2 * np.sum((np.power(v - y, 2)) / np.power(v + y, 2))
def _chi_squared_distance(v, y):
r"""
Additive symmetric χ2 distance:
.. math::
\sum\frac{(v_i-y_i)^2(v_i+y_i)}{v_iy_i}
"""
dist = np.sum(np.power(v - y, 2) * (v + y) / (v * y))
return dist
def jensen_difference_distance(v, y):
r"""
Jensen difference:
.. math::
\sum[\frac{1}{2}(v_i\ln{v_i}+y_i\ln{y_i})-(\frac{v_i+y_i}{2})\ln{(\frac{v_i+y_i}{2})}]
"""
y_v_avg = (y + v) / 2
return np.sum(
1 / 2 * (y * np.log(y) + v * np.log(v)) -
y_v_avg * np.log(y_v_avg)
)
def kumar_johnson_distance(v, y):
r"""
Kumar-Johnson distance:
.. math::
\sum\frac{(v_i^2-y_i^2)^2}{2(v_iy_i)^\frac{3}{2}}
"""
return np.sum(
np.power(np.power(v, 2) - np.power(y, 2), 2) / \
(2 * np.power(v * y, 3 / 2))
)
def avg_l_distance(v, y):
r"""
Avg (L1, L∞) distance:
.. math::
\frac{1}{2}(\sum|v_i-y_i|+\underset{i}{\max}{|v_i-y_i|})
"""
return 1 / 2 * (np.sum(np.abs(v - y)) + max(np.abs(v - y)))
def vicis_wave_hadges_distance(v, y):
r"""
Vicis-Wave Hadges distance:
.. math::
\sum\frac{|v_i-y_i|}{\min{(v_i,\ y_i)}}
"""
return np.sum(np.abs(v - y) / np.minimum(v, y))
def vicis_symmetric_chi_squared_1_distance(v, y):
r"""
Vicis-Symmetric χ2 1 distance:
.. math::
\sum\frac{(v_i-y_i)^2}{\min{(v_i,y_i)^2}}
"""
return np.sum(np.power(v - y, 2) / np.power(np.minimum(v, y), 2))
def vicis_symmetric_chi_squared_2_distance(v, y):
r"""
Vicis-Symmetric χ2 2 distance:
.. math::
\sum\frac{(v_i-y_i)^2}{\min{(v_i,y_i)}}
"""
return np.sum(np.power(v - y, 2) / np.minimum(v, y))
def vicis_symmetric_chi_squared_3_distance(v, y):
r"""
Vicis-Symmetric χ2 3 distance:
.. math::
\sum\frac{(v_i-y_i)^2}{\max{(v_i,y_i)}}
"""
return np.sum(np.power(v - y, 2) / np.maximum(v, y))
def max_symmetric_chi_squared_distance(v, y):
r"""
Max-Symmetric χ2 distance:
.. math::
\max{(\sum\frac{(v_i-y_i)^2}{v_i},\sum\frac{(v_i-y_i)^2}{y_i})}
"""
return max(np.sum(np.power(v - y, 2) / v), np.sum(np.power(v - y, 2) / y))
def min_symmetric_chi_squared_distance(v, y):
r"""
Min-Symmetric χ2 distance:
.. math::
\min{(\sum\frac{(v_i-y_i)^2}{v_i},\sum\frac{(v_i-y_i)^2}{y_i})}
"""
return min(np.sum(np.power(v - y, 2) / v), np.sum(np.power(v - y, 2) / y))
"""added by Allison"""
def additive_sym_chi_sq(v, y):
r"""
Additive Symmetric χ2 distance:
.. math::
\sum_{i}\frac{(y_{i} - v_{i})^2(y_{i}+v_{i})}{y_{i}v_{i}}
"""
return | |
<reponame>pooyamb/aiotg
import os
import re
import logging
import asyncio
from urllib.parse import splitpasswd, splituser, urlparse
import aiohttp
from aiohttp import web
from aiosocksy import Socks4Auth, Socks5Auth, connector as socks_connector
import json
try:
import certifi
import ssl
except ImportError:
certifi = None
from .chat import Chat, Sender
from .reloader import run_with_reloader
__author__ = "<NAME>"
__copyright__ = "Copyright 2015-2017 <NAME>"
__license__ = "MIT"
API_URL = "https://api.telegram.org"
API_TIMEOUT = 60
RETRY_TIMEOUT = 30
RETRY_CODES = [429, 500, 502, 503, 504]
CHATBASE_URL = "https://chatbase.com/api/message"
# Message types to be handled by bot.handle(...)
MESSAGE_TYPES = [
"location",
"photo",
"document",
"audio",
"voice",
"sticker",
"contact",
"venue",
"video",
"game",
"delete_chat_photo",
"new_chat_photo",
"delete_chat_photo",
"new_chat_member",
"left_chat_member",
"new_chat_title",
"group_chat_created",
"successful_payment",
]
# Update types for
MESSAGE_UPDATES = [
"message",
"edited_message",
"channel_post",
"edited_channel_post",
"successful_payment",
]
AIOHTTP_23 = aiohttp.__version__ > "2.3"
logger = logging.getLogger("aiotg")
class Bot:
"""Telegram bot framework designed for asyncio
:param str api_token: Telegram bot token, ask @BotFather for this
:param int api_timeout: Timeout for long polling
:param str chatbase_token: Token for http://chatbase.com
:param str name: Bot name
:param callable json_serialize: JSON serializer function. (json.dumps by default)
:param callable json_deserialize: JSON deserializer function. (json.loads by default)
:param bool default_in_groups: Enables default callback in groups
:param str proxy: Proxy URL to use for HTTP requests
"""
_running = False
_offset = 0
def __init__(
self,
api_token,
api_timeout=API_TIMEOUT,
chatbase_token=None,
name=None,
json_serialize=json.dumps,
json_deserialize=json.loads,
default_in_groups=False,
proxy=None,
):
self.api_token = api_token
self.api_timeout = api_timeout
self.chatbase_token = chatbase_token
self.name = name
self.json_serialize = json_serialize
self.json_deserialize = json_deserialize
self.default_in_groups = default_in_groups
self.webhook_url = None
self._session = None
self.proxy = proxy
self._proxy_is_socks = self.proxy and self.proxy.startswith("socks")
if self._proxy_is_socks and "@" in self.proxy:
proxy_scheme, proxy_loc = self.proxy.split("://", 1)
proxy_auth, proxy_loc = splituser(proxy_loc)
proxy_user, proxy_pass = splitpasswd(proxy_auth)
if proxy_scheme == "socks5":
proxy_auth_factory = Socks5Auth
elif proxy_scheme == "socks4":
proxy_auth_factory = Socks4Auth
else:
raise ValueError("Unknown SOCKS-proxy scheme: {}".format(proxy_scheme))
self.proxy_auth = proxy_auth_factory(proxy_user, password=proxy_pass)
self.proxy = "{}://{}".format(proxy_scheme, proxy_loc)
else:
self.proxy_auth = None
def no_handle(mt):
return lambda chat, msg: logger.debug("no handle for %s", mt)
# Init default handlers and callbacks
self._handlers = {mt: no_handle(mt) for mt in MESSAGE_TYPES}
self._commands = []
self._callbacks = []
self._inlines = []
self._checkouts = []
self._default = lambda chat, message: None
self._default_callback = lambda chat, cq: None
self._default_inline = lambda iq: None
async def loop(self):
"""
Return bot's main loop as coroutine. Use with asyncio.
:Example:
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(bot.loop())
or
>>> loop = asyncio.get_event_loop()
>>> loop.create_task(bot.loop())
"""
self._running = True
while self._running:
updates = await self.api_call(
"getUpdates", offset=self._offset + 1, timeout=self.api_timeout
)
self._process_updates(updates)
def run(self, debug=False, reload=None):
"""
Convenience method for running bots in getUpdates mode
:param bool debug: Enable debug logging and automatic reloading
:param bool reload: Automatically reload bot on code change
:Example:
>>> if __name__ == '__main__':
>>> bot.run()
"""
loop = asyncio.get_event_loop()
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
if reload is None:
reload = debug
bot_loop = asyncio.ensure_future(self.loop())
try:
if reload:
loop.run_until_complete(run_with_reloader(loop, bot_loop, self.stop))
else:
loop.run_until_complete(bot_loop)
# User cancels
except KeyboardInterrupt:
logger.debug("User cancelled")
bot_loop.cancel()
self.stop()
# Stop loop
finally:
if AIOHTTP_23:
loop.run_until_complete(self.session.close())
logger.debug("Closing loop")
loop.stop()
loop.close()
def run_webhook(self, webhook_url, **options):
"""
Convenience method for running bots in webhook mode
:Example:
>>> if __name__ == '__main__':
>>> bot.run_webhook(webhook_url="https://yourserver.com/webhooktoken")
Additional documentation on https://core.telegram.org/bots/api#setwebhook
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(self.set_webhook(webhook_url, **options))
if webhook_url:
url = urlparse(webhook_url)
app = self.create_webhook_app(url.path, loop)
host = os.environ.get("HOST", "0.0.0.0")
port = int(os.environ.get("PORT", 0)) or url.port
if AIOHTTP_23:
app.on_cleanup.append(lambda _: self.session.close())
web.run_app(app, host=host, port=port)
else:
loop.run_until_complete(self.session.close())
def stop_webhook(self):
"""
Use to switch from Webhook to getUpdates mode
"""
self.run_webhook(webhook_url="")
def add_command(self, regexp, fn):
"""
Manually register regexp based command
"""
self._commands.append((regexp, fn))
def command(self, regexp):
"""
Register a new command
:param str regexp: Regular expression matching the command to register
:Example:
>>> @bot.command(r"/echo (.+)")
>>> def echo(chat, match):
>>> return chat.reply(match.group(1))
"""
def decorator(fn):
self.add_command(regexp, fn)
return fn
return decorator
def default(self, callback):
"""
Set callback for default command that is called on unrecognized
commands for 1-to-1 chats
If default_in_groups option is True, callback is called in groups too
:Example:
>>> @bot.default
>>> def echo(chat, message):
>>> return chat.reply(message["text"])
"""
self._default = callback
return callback
def add_inline(self, regexp, fn):
"""
Manually register regexp based callback
"""
self._inlines.append((regexp, fn))
def inline(self, callback):
"""
Set callback for inline queries
:Example:
>>> @bot.inline
>>> def echo(iq):
>>> return iq.answer([
>>> {"type": "text", "title": "test", "id": "0"}
>>> ])
>>> @bot.inline(r"myinline-(.+)")
>>> def echo(chat, iq, match):
>>> return iq.answer([
>>> {"type": "text", "title": "test", "id": "0"}
>>> ])
"""
if callable(callback):
self._default_inline = callback
return callback
elif isinstance(callback, str):
def decorator(fn):
self.add_inline(callback, fn)
return fn
return decorator
else:
raise TypeError("str expected {} given".format(type(callback)))
def add_callback(self, regexp, fn):
"""
Manually register regexp based callback
"""
self._callbacks.append((regexp, fn))
def callback(self, callback):
"""
Set callback for callback queries
:Example:
>>> @bot.callback
>>> def echo(chat, cq):
>>> return cq.answer()
>>> @bot.callback(r"buttonclick-(.+)")
>>> def echo(chat, cq, match):
>>> return chat.reply(match.group(1))
"""
if callable(callback):
self._default_callback = callback
return callback
elif isinstance(callback, str):
def decorator(fn):
self.add_callback(callback, fn)
return fn
return decorator
else:
raise TypeError("str expected {} given".format(type(callback)))
def add_checkout(self, regexp, fn):
"""
Manually register regexp based checkout handler
"""
self._checkouts.append((regexp, fn))
def checkout(self, callback):
if callable(callback):
self._default_checkout = callback
elif isinstance(callback, str):
def decorator(fn):
self.add_checkout(callback, fn)
return fn
return decorator
else:
raise TypeError("str expected {} given".format(type(callback)))
def handle(self, msg_type):
"""
Set handler for specific message type
:Example:
>>> @bot.handle("audio")
>>> def handle(chat, audio):
>>> pass
"""
def wrap(callback):
self._handlers[msg_type] = callback
return callback
return wrap
def channel(self, channel_name):
"""
Construct a Chat object used to post to channel
:param str channel_name: Channel name
"""
return Chat(self, channel_name, "channel")
def private(self, user_id):
"""
Construct a Chat object used to post direct messages
:param str user_id: User id
"""
return Chat(self, user_id, "private")
def group(self, group_id):
"""
Construct a Chat object used to post group messages
:param str group_id: Group chat id
"""
return Chat(self, group_id, "group")
def api_call(self, method, **params):
"""
Call Telegram API.
See https://core.telegram.org/bots/api for reference.
:param str method: Telegram API method
:param params: Arguments for the method call
"""
coro = self._api_call(method, **params)
# Explicitly ensure that API call is executed
return asyncio.ensure_future(coro)
async def _api_call(self, method, **params):
url = "{0}/bot{1}/{2}".format(API_URL, self.api_token, method)
logger.debug("api_call %s, %s", method, params)
response = await self.session.post(
url, data=params, proxy=self.proxy, proxy_auth=self.proxy_auth
)
if response.status == 200:
return await response.json(loads=self.json_deserialize)
elif response.status in RETRY_CODES:
logger.info(
"Server returned %d, retrying in %d sec.",
response.status,
RETRY_TIMEOUT,
)
await response.release()
await asyncio.sleep(RETRY_TIMEOUT)
return await self.api_call(method, **params)
else:
if response.headers["content-type"] == "application/json":
json_resp = await response.json(loads=self.json_deserialize)
err_msg = json_resp["description"]
else:
err_msg = await response.read()
logger.error(err_msg)
raise BotApiError(err_msg, response=response)
async def get_me(self):
"""
Returns basic information about the bot
(see https://core.telegram.org/bots/api#getme)
"""
json_result = await self.api_call("getMe")
return json_result["result"]
async def leave_chat(self, chat_id):
"""
Use this method for your bot to leave a group, supergroup or channel.
Returns True on success.
:param int chat_id: Unique identifier for the target chat \
or username of the target supergroup or channel \
(in the format @channelusername)
"""
json_result = await self.api_call("leaveChat", chat_id=chat_id)
return json_result["result"]
def send_message(self, chat_id, text, **options):
"""
Send a text message to chat
:param int chat_id: ID of the chat to send the message to
:param str text: Text to send
:param options: Additional sendMessage options
(see https://core.telegram.org/bots/api#sendmessage)
"""
return self.api_call("sendMessage", chat_id=chat_id, text=text, **options)
def edit_message_text(self, chat_id, message_id, text, **options):
"""
Edit a text message in a chat
:param int chat_id: ID of the chat the message to edit is in
:param int message_id: ID of the message to edit
:param str text: Text to edit the message to
:param options: Additional API options
"""
return self.api_call(
"editMessageText",
chat_id=chat_id,
message_id=message_id,
text=text,
**options
)
def edit_message_reply_markup(self, chat_id, message_id, reply_markup, **options):
"""
Edit a reply markup of message in a chat
:param int chat_id: ID of the chat the message to edit is in
:param int message_id: ID of the message to edit
:param str reply_markup: New inline keyboard markup for the message
:param options: Additional API options
"""
return self.api_call(
"editMessageReplyMarkup",
chat_id=chat_id,
message_id=message_id,
reply_markup=reply_markup,
**options
)
async def get_file(self, file_id):
"""
Get basic information about | |
<reponame>ratschlab/spladder
import pysam
import re
import numpy as np
import scipy.sparse
import copy
import time
import h5py
import uuid
if __package__ is None:
__package__ = 'modules'
from .utils import *
from .init import *
def get_reads(fname, chr_name, start, stop, strand=None, filter=None, mapped=True, spliced=True, var_aware=None, collapse=False, primary_only=False, no_mm=False, mm_tag='NM', cram_ref=None):
if not re.search(r'\.[bB][aA][mM]$', fname) is None:
infile = pysam.AlignmentFile(fname, 'rb')
elif not re.search(r'\.[cC][rR][aA][mM]$', fname) is None:
infile = pysam.AlignmentFile(fname, 'rc', reference_filename=cram_ref, ignore_truncation=True)
else:
sys.stderr.write('Error: Unknown input alignment format for: %s\n' % fname)
### vectors to build sparse matrix
i = []
j = []
read_cnt = 0
introns_p = dict()
introns_m = dict()
if collapse:
read_matrix = np.zeros((1, stop - start), dtype='int')
else:
read_matrix = scipy.sparse.coo_matrix((np.ones(0), ([], [])), shape = (0, stop - start), dtype='bool')
length = stop - start
#print >> sys.stderr, 'querying %s:%i-%i' % (chr_name, start, stop)
### TODO THIS IS A HACK
if chr_name == 'MT':
return (read_matrix, np.zeros(shape=(0, 3), dtype='uint32'), np.zeros(shape=(0, 3), dtype='uint32'))
if infile.gettid(chr_name) > -1:
### pysam query is zero based in position (results are as well), all intervals are pythonic half open
for read in infile.fetch(chr_name, start, stop, until_eof=True):
### check if we skip this read
if filter_read(read, filter, spliced, mapped, strand, primary_only, var_aware, no_mm, mm_tag=mm_tag):
continue
tags = dict(read.tags)
curr_read_stranded = ('XS' in tags)
is_minus = False
if curr_read_stranded:
is_minus = (tags['XS'] == '-')
### get introns and covergae
p = read.pos
for o in read.cigar:
if o[0] == 3:
if is_minus:
try:
introns_m[(p, p + o[1])] += 1
except KeyError:
introns_m[(p, p + o[1])] = 1
else:
try:
introns_p[(p, p + o[1])] += 1
except KeyError:
introns_p[(p, p + o[1])] = 1
if o[0] in [0, 2]:
_start = int(max(p-start, 0))
_stop = int(min(p + o[1] - start, stop - start))
if _stop < 0 or _start > length:
if o[0] in [0, 2, 3]:
p += o[1]
continue
if collapse:
read_matrix[0, _start:_stop] += 1
else:
r = np.arange(_start, _stop)
i.extend([read_cnt] * len(r))
j.extend(r)
#for pp in range(p, p + o[1]):
# if pp - start >= 0 and pp < stop:
# i.append(read_cnt)
# j.append(pp - start)
if o[0] in [0, 2, 3]:
p += o[1]
### the follwoing is new behavior and gonne come in the next version --> deletions are not counted towards coverage
#### get coverage
#for p in read.positions:
# if p - start >= 0:
# if p >= stop:
# break
# else:
# i.append(read_cnt)
# j.append(p - start)
read_cnt += 1
### construct sparse matrix
if not collapse:
try:
i = np.array(i, dtype='int')
j = np.array(j, dtype='int')
read_matrix = scipy.sparse.coo_matrix((np.ones(i.shape[0]), (i, j)), shape = (read_cnt, stop - start), dtype='bool')
except ValueError:
step = 1000000
_k = step
assert len(i) > _k
read_matrix = scipy.sparse.coo_matrix((np.ones(_k), (i[:_k], j[:_k])), shape = (read_cnt, stop - start), dtype='bool')
while _k < len(i):
_l = min(len(i), _k + step)
read_matrix += scipy.sparse.coo_matrix((np.ones(_l - _k), (i[_k:_l], j[_k:_l])), shape = (read_cnt, stop - start), dtype='bool')
_k = _l
### convert introns into scipy array
if len(introns_p) >= 1:
introns_p = np.array([[k[0], k[1], v] for k, v in introns_p.items()], dtype='uint32')
introns_p = sort_rows(introns_p)
else:
introns_p = np.zeros(shape=(0, 3), dtype='uint32')
if len(introns_m) >= 1:
introns_m = np.array([[k[0], k[1], v] for k, v in introns_m.items()], dtype='uint32')
introns_m = sort_rows(introns_m)
else:
introns_m = np.zeros(shape=(0, 3), dtype='uint32')
return (read_matrix, introns_p, introns_m)
def add_reads_from_bam(blocks, filenames, types, filter=None, var_aware=False, primary_only=False, no_mm=False, unstranded=True, mm_tag='NM', cram_ref=None):
# blocks coordinates are assumed to be in closed intervals
#if filter is None:
# filter = dict()
# filter['intron'] = 20000
# filter['exon_len'] = 8
# filter['mismatch']= 1
if not types:
print('add_reads_from_bam: nothing to do')
return
verbose = False
pair = False
pair = ('pair_coverage' in types)
clipped = False
if type(blocks).__module__ != 'numpy':
blocks = np.array([blocks])
for b in range(blocks.shape[0]):
introns_p = None
introns_m = None
if verbose and b % 10 == 0:
print('\radd_exon_track_from_bam: %i(%i)' % (b, blocks.shape[0]))
block_len = int(blocks[b].stop - blocks[b].start)
## get data from bam
if 'exon_track' in types:
(introns_p, introns_m, coverage) = get_all_data(blocks[b], filenames, filter=filter, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if 'mapped_exon_track' in types:
(introns_p, introns_m, mapped_coverage) = get_all_data(blocks[b], filenames, spliced=False, filter=filter, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if 'spliced_exon_track' in types:
(introns_p, introns_m, spliced_coverage) = get_all_data(blocks[b], filenames, mapped=False, filter=filter, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if 'polya_signal_track' in types:
(introns_p, introns_m, polya_signals) = get_all_data_uncollapsed(blocks[b], filenames, filter=filter, clipped=True, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if 'end_signal_track' in types:
(introns_p, introns_m, read_end_signals) = get_all_data_uncollapsed(blocks[b], filenames, filter=filter, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if 'intron_list' in types or 'intron_track' in types:
if introns_p is None:
(introns_p, introns_m, spliced_coverage) = get_all_data(blocks[b], filenames, mapped=False, filter=filter, var_aware=var_aware, primary_only=primary_only, no_mm=no_mm, mm_tag=mm_tag, cram_ref=cram_ref)
if not introns_p is None:
introns_p = sort_rows(introns_p)
if not introns_m is None:
introns_m = sort_rows(introns_m)
# add requested data to block
tracks = np.zeros((0, block_len))
intron_list = []
for ttype in types:
## add exon track to block
##############################################################################
if ttype == 'exon_track':
tracks = np.r_[tracks, coverage]
## add mapped exon track to block
##############################################################################
elif ttype == 'mapped_exon_track':
tracks = np.r_[tracks, mapped_coverage]
## add spliced exon track to block
##############################################################################
elif ttype == 'spliced_exon_track':
tracks = np.r_[tracks, spliced_coverage]
## add intron coverage track to block
##############################################################################
elif ttype == 'intron_track':
intron_coverage = np.zeros((1, block_len))
if introns_p.shape[0] > 0:
for k in range(introns_p.shape[0]):
from_pos = max(0, introns_p[k, 0])
to_pos = min(block_len, introns_p[k, 1])
intron_coverage[from_pos:to_pos] += introns_p[k, 2]
if introns_m.shape[0] > 0:
for k in range(introns_m.shape[0]):
from_pos = max(0, introns_m[k, 0])
to_pos = min(block_len, introns_m[k, 1])
intron_coverage[from_pos:to_pos] += introns_m[k, 2]
tracks = np.r_[tracks, intron_coverage]
## compute intron list
##############################################################################
elif ttype == 'intron_list':
if introns_p.shape[0] > 0 or introns_m.shape[0] > 0:
### filter introns for location relative to block
### this is legacy behavior for matlab versions!
### TODO - Think about keeping this? Make it a parameter?
k_idx = np.where((introns_p[:, 0] > blocks[0].start) & (introns_p[:, 1] < blocks[0].stop))[0]
introns_p = introns_p[k_idx, :]
k_idx = np.where((introns_m[:, 0] > blocks[0].start) & (introns_m[:, 1] < blocks[0].stop))[0]
introns_m = introns_m[k_idx, :]
if unstranded:
introns = sort_rows(np.r_[introns_p, introns_m])
else:
if blocks[0].strand == '-':
introns = introns_m
else:
introns = introns_p
if filter is not None and 'mincount' in filter:
take_idx = np.where(introns[:, 2] >= filter['mincount'])[0]
if take_idx.shape[0] > 0:
intron_list.append(introns[take_idx, :])
else:
intron_list.append(np.zeros((0, 3), dtype='uint32'))
else:
intron_list.append(introns)
else:
intron_list.append(np.zeros((0, 3), dtype='uint32'))
## add polya signal track
##############################################################################
elif ttype == 'polya_signal_track':
### get only end positions of reads
shp = polya_signals
end_idx = shp[0] - 1 - polya_signals[:, ::-1].argmax(axis = 1)
polya_signals = scipy.sparse.coo_matrix((np.ones((shp[1],)), (np.arange(shp[1]), end_idx)), shape = shp)
tracks = np.r_[tracks, polya_signals.sum(axis = 0)]
## add end signal track
##############################################################################
elif ttype == 'end_signal_track':
### get only end positions of reads
shp = end_signals
end_idx = shp[0] - 1 - end_signals[:, ::-1].argmax(axis = 1)
end_signals = scipy.sparse.coo_matrix((np.ones((shp[1],)), (np.arange(shp[1]), end_idx)), shape = shp)
tracks = np.r_[tracks, end_signals.sum(axis = 0)]
else:
print('ERROR: unknown type of data requested: %s' % ttype, file=sys.stderr)
if len(types) == 1 and types[0] == 'intron_list':
return intron_list
elif 'intron_list' in types:
return (tracks, intron_list)
else:
return tracks
def add_reads_from_sparse_bam(gg, fname, contig, conf, types=None, filter=None, cache=None, unstranded=False):
if cache is None or len(cache) == 0:
### load counts from summary file
if fname.endswith('hdf5'):
IN = h5py.File(fname, 'r')
else:
if not filter is None:
IN = h5py.File(re.sub(r'\.[bB][aA][mM]|\.[cC][rR][aA][mM]$', '', fname) + '.conf_%i.' % conf + 'filt.' + 'hdf5', 'r')
else:
IN = h5py.File(re.sub(r'\.[bB][aA][mM]|\.[cC][rR][aA][mM]$', '', fname) + '.hdf5', 'r')
### re-build sparse matrix
cache['reads'] = scipy.sparse.coo_matrix((IN[contig + '_reads_dat'][:], (IN[contig + '_reads_row'][:], IN[contig + '_reads_col'][:])), shape=IN[contig + '_reads_shp'][:], dtype='uint32').tocsc()
cache['introns_m'] = IN[contig + '_introns_m'][:]
cache['introns_p'] = IN[contig + '_introns_p'][:]
IN.close()
ret = []
if 'exon_track' in types:
if cache['reads'].shape[0] == 0:
tracks = np.zeros((1, gg.stop - gg.start), dtype='int')
elif not unstranded and cache['reads'].shape[0] > 1:
tracks = cache['reads'][[0, 1 + int(gg.strand == '-')], gg.start:gg.stop].todense()
else:
tracks = cache['reads'][:, gg.start:gg.stop].todense()
ret.append(tracks)
if 'intron_list' in types:
if unstranded:
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SyntheticsGlobalVariableArgs', 'SyntheticsGlobalVariable']
@pulumi.input_type
class SyntheticsGlobalVariableArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
parse_test_id: Optional[pulumi.Input[str]] = None,
parse_test_options: Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']] = None,
restricted_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
secure: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SyntheticsGlobalVariable resource.
:param pulumi.Input[str] name: Synthetics global variable name.
:param pulumi.Input[str] value: The value of the global variable.
:param pulumi.Input[str] description: Description of the global variable.
:param pulumi.Input[str] parse_test_id: Id of the Synthetics test to use for a variable from test.
:param pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs'] parse_test_options: ID of the Synthetics test to use a source of the global variable value.
:param pulumi.Input[Sequence[pulumi.Input[str]]] restricted_roles: A list of role identifiers to associate with the Synthetics global variable.
:param pulumi.Input[bool] secure: If set to true, the value of the global variable is hidden. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics global variable.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if description is not None:
pulumi.set(__self__, "description", description)
if parse_test_id is not None:
pulumi.set(__self__, "parse_test_id", parse_test_id)
if parse_test_options is not None:
pulumi.set(__self__, "parse_test_options", parse_test_options)
if restricted_roles is not None:
pulumi.set(__self__, "restricted_roles", restricted_roles)
if secure is not None:
pulumi.set(__self__, "secure", secure)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Synthetics global variable name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the global variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the global variable.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="parseTestId")
def parse_test_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Synthetics test to use for a variable from test.
"""
return pulumi.get(self, "parse_test_id")
@parse_test_id.setter
def parse_test_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parse_test_id", value)
@property
@pulumi.getter(name="parseTestOptions")
def parse_test_options(self) -> Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']]:
"""
ID of the Synthetics test to use a source of the global variable value.
"""
return pulumi.get(self, "parse_test_options")
@parse_test_options.setter
def parse_test_options(self, value: Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']]):
pulumi.set(self, "parse_test_options", value)
@property
@pulumi.getter(name="restrictedRoles")
def restricted_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of role identifiers to associate with the Synthetics global variable.
"""
return pulumi.get(self, "restricted_roles")
@restricted_roles.setter
def restricted_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "restricted_roles", value)
@property
@pulumi.getter
def secure(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the value of the global variable is hidden. Defaults to `false`.
"""
return pulumi.get(self, "secure")
@secure.setter
def secure(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "secure", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your synthetics global variable.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _SyntheticsGlobalVariableState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parse_test_id: Optional[pulumi.Input[str]] = None,
parse_test_options: Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']] = None,
restricted_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
secure: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SyntheticsGlobalVariable resources.
:param pulumi.Input[str] description: Description of the global variable.
:param pulumi.Input[str] name: Synthetics global variable name.
:param pulumi.Input[str] parse_test_id: Id of the Synthetics test to use for a variable from test.
:param pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs'] parse_test_options: ID of the Synthetics test to use a source of the global variable value.
:param pulumi.Input[Sequence[pulumi.Input[str]]] restricted_roles: A list of role identifiers to associate with the Synthetics global variable.
:param pulumi.Input[bool] secure: If set to true, the value of the global variable is hidden. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics global variable.
:param pulumi.Input[str] value: The value of the global variable.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if parse_test_id is not None:
pulumi.set(__self__, "parse_test_id", parse_test_id)
if parse_test_options is not None:
pulumi.set(__self__, "parse_test_options", parse_test_options)
if restricted_roles is not None:
pulumi.set(__self__, "restricted_roles", restricted_roles)
if secure is not None:
pulumi.set(__self__, "secure", secure)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the global variable.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Synthetics global variable name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="parseTestId")
def parse_test_id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Synthetics test to use for a variable from test.
"""
return pulumi.get(self, "parse_test_id")
@parse_test_id.setter
def parse_test_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parse_test_id", value)
@property
@pulumi.getter(name="parseTestOptions")
def parse_test_options(self) -> Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']]:
"""
ID of the Synthetics test to use a source of the global variable value.
"""
return pulumi.get(self, "parse_test_options")
@parse_test_options.setter
def parse_test_options(self, value: Optional[pulumi.Input['SyntheticsGlobalVariableParseTestOptionsArgs']]):
pulumi.set(self, "parse_test_options", value)
@property
@pulumi.getter(name="restrictedRoles")
def restricted_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of role identifiers to associate with the Synthetics global variable.
"""
return pulumi.get(self, "restricted_roles")
@restricted_roles.setter
def restricted_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "restricted_roles", value)
@property
@pulumi.getter
def secure(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, the value of the global variable is hidden. Defaults to `false`.
"""
return pulumi.get(self, "secure")
@secure.setter
def secure(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "secure", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your synthetics global variable.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the global variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class SyntheticsGlobalVariable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parse_test_id: Optional[pulumi.Input[str]] = None,
parse_test_options: Optional[pulumi.Input[pulumi.InputType['SyntheticsGlobalVariableParseTestOptionsArgs']]] = None,
restricted_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
secure: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Datadog synthetics global variable resource. This can be used to create and manage Datadog synthetics global variables.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
test_variable = datadog.SyntheticsGlobalVariable("testVariable",
description="Description of the variable",
name="EXAMPLE_VARIABLE",
tags=[
"foo:bar",
"env:test",
],
value="variable-value")
```
## Import
# Synthetics global variables can be imported using their string ID, e.g.
```sh
$ pulumi import datadog:index/syntheticsGlobalVariable:SyntheticsGlobalVariable fizz abcde123-fghi-456-jkl-mnopqrstuv
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the global variable.
:param pulumi.Input[str] name: Synthetics global variable name.
:param pulumi.Input[str] parse_test_id: Id of the Synthetics test to use for a variable from test.
:param pulumi.Input[pulumi.InputType['SyntheticsGlobalVariableParseTestOptionsArgs']] parse_test_options: ID of the Synthetics test to use a source of the global variable value.
:param pulumi.Input[Sequence[pulumi.Input[str]]] restricted_roles: A list of role identifiers to associate with the Synthetics global variable.
:param pulumi.Input[bool] secure: If set to true, the value of the global variable is hidden. Defaults to `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of tags to associate with your synthetics global variable.
:param pulumi.Input[str] value: The value of the global variable.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SyntheticsGlobalVariableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Datadog synthetics global variable resource. This can be used to create and manage Datadog synthetics global variables.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
test_variable = datadog.SyntheticsGlobalVariable("testVariable",
description="Description of the variable",
name="EXAMPLE_VARIABLE",
tags=[
"foo:bar",
"env:test",
],
value="variable-value")
```
## Import
# Synthetics global variables can be imported using their string ID, e.g.
```sh
$ pulumi import datadog:index/syntheticsGlobalVariable:SyntheticsGlobalVariable fizz abcde123-fghi-456-jkl-mnopqrstuv
```
:param str resource_name: The name of the resource.
:param SyntheticsGlobalVariableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SyntheticsGlobalVariableArgs, pulumi.ResourceOptions, *args, **kwargs)
| |
val:
:rtype: PreviewError
"""
return cls('path', val)
def is_path(self):
"""
Check if the union tag is ``path``.
:rtype: bool
"""
return self._tag == 'path'
def is_in_progress(self):
"""
Check if the union tag is ``in_progress``.
:rtype: bool
"""
return self._tag == 'in_progress'
def is_unsupported_extension(self):
"""
Check if the union tag is ``unsupported_extension``.
:rtype: bool
"""
return self._tag == 'unsupported_extension'
def is_unsupported_content(self):
"""
Check if the union tag is ``unsupported_content``.
:rtype: bool
"""
return self._tag == 'unsupported_content'
def get_path(self):
"""
An error occurs when downloading metadata for the file.
Only call this if :meth:`is_path` is true.
:rtype: LookupError
"""
if not self.is_path():
raise AttributeError("tag 'path' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(PreviewError, self)._process_custom_annotations(annotation_type, field_path, processor)
PreviewError_validator = bv.Union(PreviewError)
class PreviewResult(bb.Struct):
"""
:ivar files.PreviewResult.file_metadata: Metadata corresponding to the file
received as an argument. Will be populated if the endpoint is called
with a path (ReadPath).
:ivar files.PreviewResult.link_metadata: Minimal metadata corresponding to
the file received as an argument. Will be populated if the endpoint is
called using a shared link (SharedLinkFileInfo).
"""
__slots__ = [
'_file_metadata_value',
'_link_metadata_value',
]
_has_required_fields = False
def __init__(self,
file_metadata=None,
link_metadata=None):
self._file_metadata_value = bb.NOT_SET
self._link_metadata_value = bb.NOT_SET
if file_metadata is not None:
self.file_metadata = file_metadata
if link_metadata is not None:
self.link_metadata = link_metadata
# Instance attribute type: FileMetadata (validator is set below)
file_metadata = bb.Attribute("file_metadata", nullable=True, user_defined=True)
# Instance attribute type: MinimalFileLinkMetadata (validator is set below)
link_metadata = bb.Attribute("link_metadata", nullable=True, user_defined=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(PreviewResult, self)._process_custom_annotations(annotation_type, field_path, processor)
PreviewResult_validator = bv.Struct(PreviewResult)
class RelocationPath(bb.Struct):
"""
:ivar files.RelocationPath.from_path: Path in the user's Dropbox to be
copied or moved.
:ivar files.RelocationPath.to_path: Path in the user's Dropbox that is the
destination.
"""
__slots__ = [
'_from_path_value',
'_to_path_value',
]
_has_required_fields = True
def __init__(self,
from_path=None,
to_path=None):
self._from_path_value = bb.NOT_SET
self._to_path_value = bb.NOT_SET
if from_path is not None:
self.from_path = from_path
if to_path is not None:
self.to_path = to_path
# Instance attribute type: str (validator is set below)
from_path = bb.Attribute("from_path")
# Instance attribute type: str (validator is set below)
to_path = bb.Attribute("to_path")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationPath, self)._process_custom_annotations(annotation_type, field_path, processor)
RelocationPath_validator = bv.Struct(RelocationPath)
class RelocationArg(RelocationPath):
"""
:ivar files.RelocationArg.allow_shared_folder: This flag has no effect.
:ivar files.RelocationArg.autorename: If there's a conflict, have the
Dropbox server try to autorename the file to avoid the conflict.
:ivar files.RelocationArg.allow_ownership_transfer: Allow moves by owner
even if it would result in an ownership transfer for the content being
moved. This does not apply to copies.
"""
__slots__ = [
'_allow_shared_folder_value',
'_autorename_value',
'_allow_ownership_transfer_value',
]
_has_required_fields = True
def __init__(self,
from_path=None,
to_path=None,
allow_shared_folder=None,
autorename=None,
allow_ownership_transfer=None):
super(RelocationArg, self).__init__(from_path,
to_path)
self._allow_shared_folder_value = bb.NOT_SET
self._autorename_value = bb.NOT_SET
self._allow_ownership_transfer_value = bb.NOT_SET
if allow_shared_folder is not None:
self.allow_shared_folder = allow_shared_folder
if autorename is not None:
self.autorename = autorename
if allow_ownership_transfer is not None:
self.allow_ownership_transfer = allow_ownership_transfer
# Instance attribute type: bool (validator is set below)
allow_shared_folder = bb.Attribute("allow_shared_folder")
# Instance attribute type: bool (validator is set below)
autorename = bb.Attribute("autorename")
# Instance attribute type: bool (validator is set below)
allow_ownership_transfer = bb.Attribute("allow_ownership_transfer")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationArg, self)._process_custom_annotations(annotation_type, field_path, processor)
RelocationArg_validator = bv.Struct(RelocationArg)
class RelocationBatchArg(RelocationBatchArgBase):
"""
:ivar files.RelocationBatchArg.allow_shared_folder: This flag has no effect.
:ivar files.RelocationBatchArg.allow_ownership_transfer: Allow moves by
owner even if it would result in an ownership transfer for the content
being moved. This does not apply to copies.
"""
__slots__ = [
'_allow_shared_folder_value',
'_allow_ownership_transfer_value',
]
_has_required_fields = True
def __init__(self,
entries=None,
autorename=None,
allow_shared_folder=None,
allow_ownership_transfer=None):
super(RelocationBatchArg, self).__init__(entries,
autorename)
self._allow_shared_folder_value = bb.NOT_SET
self._allow_ownership_transfer_value = bb.NOT_SET
if allow_shared_folder is not None:
self.allow_shared_folder = allow_shared_folder
if allow_ownership_transfer is not None:
self.allow_ownership_transfer = allow_ownership_transfer
# Instance attribute type: bool (validator is set below)
allow_shared_folder = bb.Attribute("allow_shared_folder")
# Instance attribute type: bool (validator is set below)
allow_ownership_transfer = bb.Attribute("allow_ownership_transfer")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(RelocationBatchArg, self)._process_custom_annotations(annotation_type, field_path, processor)
RelocationBatchArg_validator = bv.Struct(RelocationBatchArg)
class RelocationError(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar files.RelocationError.cant_copy_shared_folder: Shared folders can't be
copied.
:ivar files.RelocationError.cant_nest_shared_folder: Your move operation
would result in nested shared folders. This is not allowed.
:ivar files.RelocationError.cant_move_folder_into_itself: You cannot move a
folder into itself.
:ivar files.RelocationError.too_many_files: The operation would involve more
than 10,000 files and folders.
:ivar files.RelocationError.duplicated_or_nested_paths: There are
duplicated/nested paths among ``RelocationArg.from_path`` and
``RelocationArg.to_path``.
:ivar files.RelocationError.cant_transfer_ownership: Your move operation
would result in an ownership transfer. You may reissue the request with
the field ``RelocationArg.allow_ownership_transfer`` to true.
:ivar files.RelocationError.insufficient_quota: The current user does not
have enough space to move or copy the files.
:ivar files.RelocationError.internal_error: Something went wrong with the
job on Dropbox's end. You'll need to verify that the action you were
taking succeeded, and if not, try again. This should happen very rarely.
:ivar files.RelocationError.cant_move_shared_folder: Can't move the shared
folder to the given destination.
:ivar MoveIntoVaultError RelocationError.cant_move_into_vault: Some content
cannot be moved into Vault under certain circumstances, see detailed
error.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
cant_copy_shared_folder = None
# Attribute is overwritten below the class definition
cant_nest_shared_folder = None
# Attribute is overwritten below the class definition
cant_move_folder_into_itself = None
# Attribute is overwritten below the class definition
too_many_files = None
# Attribute is overwritten below the class definition
duplicated_or_nested_paths = None
# Attribute is overwritten below the class definition
cant_transfer_ownership = None
# Attribute is overwritten below the class definition
insufficient_quota = None
# Attribute is overwritten below the class definition
internal_error = None
# Attribute is overwritten below the class definition
cant_move_shared_folder = None
# Attribute is overwritten below the class definition
other = None
@classmethod
def from_lookup(cls, val):
"""
Create an instance of this class set to the ``from_lookup`` tag with
value ``val``.
:param LookupError val:
:rtype: RelocationError
"""
return cls('from_lookup', val)
@classmethod
def from_write(cls, val):
"""
Create an instance of this class set to the ``from_write`` tag with
value ``val``.
:param WriteError val:
:rtype: RelocationError
"""
return cls('from_write', val)
@classmethod
def to(cls, val):
"""
Create an instance of this class set to the ``to`` tag with value
``val``.
:param WriteError val:
:rtype: RelocationError
"""
return cls('to', val)
@classmethod
def cant_move_into_vault(cls, val):
"""
Create an instance of this class set to the ``cant_move_into_vault`` tag
with value ``val``.
:param MoveIntoVaultError val:
:rtype: RelocationError
"""
return cls('cant_move_into_vault', val)
def is_from_lookup(self):
"""
Check if the union tag is ``from_lookup``.
:rtype: bool
"""
return self._tag == 'from_lookup'
def is_from_write(self):
"""
Check if the union tag is ``from_write``.
:rtype: bool
"""
return self._tag == 'from_write'
def is_to(self):
"""
Check if the union tag is ``to``.
:rtype: bool
"""
return self._tag == 'to'
def is_cant_copy_shared_folder(self):
"""
Check if the union tag is ``cant_copy_shared_folder``.
:rtype: bool
"""
return self._tag == 'cant_copy_shared_folder'
def is_cant_nest_shared_folder(self):
"""
Check if the union tag is ``cant_nest_shared_folder``.
:rtype: bool
"""
return self._tag == 'cant_nest_shared_folder'
def is_cant_move_folder_into_itself(self):
"""
Check if the union tag is ``cant_move_folder_into_itself``.
:rtype: bool
"""
return self._tag == 'cant_move_folder_into_itself'
def is_too_many_files(self):
"""
Check if the union tag is ``too_many_files``.
:rtype: bool
"""
return self._tag == 'too_many_files'
def is_duplicated_or_nested_paths(self):
"""
Check if the union tag is ``duplicated_or_nested_paths``.
:rtype: bool
"""
return self._tag == 'duplicated_or_nested_paths'
def is_cant_transfer_ownership(self):
"""
Check if the union tag is ``cant_transfer_ownership``.
:rtype: bool
"""
return self._tag == 'cant_transfer_ownership'
def is_insufficient_quota(self):
"""
Check if the union tag is ``insufficient_quota``.
:rtype: bool
"""
return self._tag == 'insufficient_quota'
def is_internal_error(self):
"""
Check if the union tag is ``internal_error``.
:rtype: bool
"""
return self._tag == 'internal_error'
def is_cant_move_shared_folder(self):
"""
Check if the union tag is ``cant_move_shared_folder``.
:rtype: bool
"""
return self._tag == 'cant_move_shared_folder'
def is_cant_move_into_vault(self):
"""
Check if the union tag is ``cant_move_into_vault``.
:rtype: bool
"""
return self._tag == 'cant_move_into_vault'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_from_lookup(self):
"""
Only call this if :meth:`is_from_lookup` is true.
:rtype: LookupError
"""
if not self.is_from_lookup():
raise AttributeError("tag 'from_lookup' not set")
return self._value
def get_from_write(self):
"""
Only | |
from abaqusConstants import *
class GraphicsOptions:
"""The GraphicsOptions object stores settings that control how objects are rendered in all
viewports. GraphicsOptions objects are accessed in one of two ways:
- The default graphics options. These settings are used as defaults when you start a
session and by the Defaults button on the Graphics ConstrainedSketchOptions dialog box.
- The current graphics options.
The GraphicsOptions object has no constructor; Abaqus creates both the
*defaultGraphicsOptions* and the *graphicsOptions* members when a session is started.
When you start a session, Abaqus detects the graphics hardware installed on your system
and uses the setValues method in the environment file (abaqus_v6.env ) to modify the
members of the GraphicsOptions object. If your graphics hardware is not supported by
Abaqus/CAE, or if you wish to override the default graphics options, you can modify
settings in the environment file. For more information, see Tuning graphics cards.
Attributes
----------
backgroundOverride: Boolean
A Boolean specifying whether a viewport background style of GRADIENT can be overridden
when displaying :py:class:`~.certain` objects, such as sketches or XY plots. When overridden, the
background will be the top color of the gradient background.
backfaceCulling: Boolean
A Boolean specifying whether facets that are determined to be facing away from the
viewer will be drawn. The default value is ON. backfaceCulling provides a performance
enhancement when displaying solid elements where the front side of the element occludes
the back side of the element. Set **backfaceCulling=OFF** if it appears that you are
seeing the back side of an element and the front side is missing. You should also set
**backfaceCulling=OFF** if you believe the display is not complete.
graphicsDriver: SymbolicConstant
A SymbolicConstant specifying the graphics driver to use. Abaqus/CAE currently uses
OpenGL exclusively so the only possible value is OPEN_GL. OPEN_GL takes advantage of
graphics adapter hardware acceleration.
doubleBuffering: Boolean
A Boolean specifying whether double buffering is used. The default value is ON.Double
buffering controls where Abaqus/CAE draws its graphics. When **doubleBuffering=OFF**,
everything is drawn directly to the screen and on many systems you can see the progress
of the drawing operations. Most users find this distracting, especially in dynamic
situations such as view manipulation or animation of results. When **doubleBuffering=ON**,
the drawing occurs in a separate graphics buffer that is displayed when all the drawing
operations are complete. This results in a much smoother display during view changes or
animation. It is recommended that you set double buffering to ON.
displayLists: Boolean
A Boolean specifying whether a display list will be used to accelerate graphics
performance. The default value is ON.When **displayLists=ON**, drawing operations are
recorded in a list that can be quickly replayed. This results in faster drawing on most
systems but requires extra memory to record the drawing operations. In the Visualization
module, display lists are only used during view manipulations and then their use is
subject to the setting of **viewManipDisplayListThreshold**.
dragMode: SymbolicConstant
A SymbolicConstant specifying which rendering is used during dynamic rotations of the
view. Possible values are:FAST, specifying a rendering mode where the image is rendered
in wireframe.AS_IS, specifying a rendering mode where the image is rendered as is.The
default value is AS_IS.When set to **dragMode=FAST**, a wireframe outline is drawn during
view changes by rotation, pan, or zoom. When **dragMode=AS_IS**, everything displayed in
the window will be drawn during view changes; however, the display may lag behind the
mouse movement when the model is complex especially if you are using an older or slower
system. For newer systems with graphics hardware acceleration the AS_IS setting can be
accommodated without significant loss of performance.
antiAlias: Boolean
A Boolean specifying whether lines will be smoothed to reduce the jagged effect of
rasterization. The default value is ON.
autoFitAfterRotate: Boolean
A Boolean specifying whether the model is automatically resized to fit the viewport
after each view rotation. The default value is OFF.
polygonOffsetConstant: float
A Float specifying the offset added when drawing the faces of a polygon. The
**polygonOffsetConstant** argument affects the behavior of only the OpenGL driver.
Possible values are 0.0 ≤≤ **polygonOffsetConstant** ≤≤ 100.0. The default value is
platform dependent and is typically between 0.0 and 2.0.
polygonOffsetSlope: float
A Float specifying the factor that multiplies the slope of each line before the line is
added to the vertexes of a polygon face. The **polygonOffsetSlope** argument affects the
behavior of only the OpenGL driver. Possible values are 0.0 ≤≤ **polygonOffsetSlope** ≤≤
100.0. The default value is platform dependent and is typically between 0.0 and 2.0.
printPolygonOffsetConstant: float
A Float specifying the offset added when drawing the faces of a polygon.
**printPolygonOffsetConstant** is similar to **polygonOffsetConstant**; however,
**printPolygonOffsetConstant** is used when printing and **polygonOffsetConstant** is used
for display. Some systems, especially Windows, use different OpenGL drivers for printing
and display, and you may have to use different offset values for each driver.
printPolygonOffsetSlope: float
A Float specifying the factor that multiplies the slope of each line before the line is
added to the vertexes of a polygon face. **printPolygonOffsetSlope** is similar to
**polygonOffsetSlope**; however, **printPolygonOffsetSlope** is used when printing and
**polygonOffsetSlope** is used for display. Some systems, especially Windows, use
different OpenGL drivers for printing and display, and you may have to use different
offset values for each driver.
vertexArrays: Boolean
A Boolean specifying how the three-dimensional vertices of the model are processed. When
**vertexArrays=OFF**, each vertex of the model is processed separately. When
**vertexArrays=ON**, the vertices are processed in large blocks resulting in faster
display. Not all graphics adapters support this capability correctly. An indicator that
the graphics adapters is not processing three-dimensional vertices correctly is the
absence of graphics during rubber banding operations. For example, when dynamically
dragging the radius of a circle in the Sketcher, the circle should be visible. The
default value is ON.
vertexArraysInDisplayLists: Boolean
A Boolean specifying whether the **vertexArrays** setting should temporarily be set to OFF
when building a display list. The default value is ON.Some graphics adapters do not
properly support using vertex arrays inside a display list. Setting
**vertexArraysInDisplayLists** to OFF has a smaller impact on graphics performance than
setting **vertexArrays** or **displayLists** to OFF.
viewManipDisplayListThreshold: int
An Int specifying how large a display list may be created in order to accelerate view
manipulation operations. Increasing this value when viewing large models will increase
the delay before a view manipulation operation begins in order to obtain improved
graphics performance during the view manipulation. If set high with a large model, the
delay can be many seconds. In excessive cases, graphics memory can be exceeded and the
result may be an empty display list (no visible model) for the view manipulation. This
setting is treated as 0 if **displayLists=OFF**. Possible values are 0 ≤≤
**viewManipDisplayListThreshold** ≤≤ 20000. The default value is 40.
directRendering: Boolean
A Boolean specifying how Abaqus renders X11 graphics operations. When
**directRendering=OFF**, the graphics are rendered through the X Server. When
**directRendering=ON**, the graphics operations are sent directly to the graphics adapter
producing faster displays. For maximum performance, the initial value is ON. This
argument is used only when you first start Abaqus/CAE; you cannot configure
**directRendering** during a session.
hardwareAcceleration: Boolean
A Boolean specifying whether a hardware accelerated OpenGL graphics driver will be used
on Windows platforms. The default value is ON.When **hardwareAcceleration=OFF**, the
graphics driver uses a software implementation of OpenGL that is included with the
operating system. This results in slower drawing on most systems; however, you may have
to use the software implementation of OpenGL if the hardware graphics driver is
incompatible with Abaqus/CAE.**hardwareAcceleration** is used only when you first start
Abaqus/CAE on a Windows platform; you cannot configure **hardwareAcceleration** during a
session.**hardwareAcceleration** is not used when you start | |
#!/usr/bin/env python
"""
============
Image Target
============
Create continuum and spectral line images for the ALMA targets.
NOTE: Run with `execfile` in CASA to use this script.
"""
from __future__ import (print_function, division)
import os
import glob
import shutil
import datetime
from collections import namedtuple
import numpy as np
#import findContinuum # probably only on NAASC machines
Spw = namedtuple('Spw', 'name, spw_id, restfreq, velo_width, nchan, ot_name')
SPWS = { spw.name : spw for spw in [
Spw('cs', 1, '97980.953MHz', '48.0km/s', 6, '#BB_3#SW-01'),
Spw('so', 1, '99299.870MHz', '48.0km/s', 6, '#BB_3#SW-01'),
Spw('hc3n', 2, '100076.391MHz', '48.0km/s', 6, '#BB_4#SW-01'),
Spw('hcop', 3, '89188.526MHz', '0.106km/s', 200, '#BB_1#SW-01'),
Spw('hcn', 4, '88631.847MHz', '0.106km/s', 300, '#BB_1#SW-02'),
Spw('nhhd', 5, '85926.258MHz', '0.107km/s', 150, '#BB_2#SW-01'),
Spw('htcop', 6, '86754.288MHz', '0.106km/s', 100, '#BB_2#SW-02'),
]}
# NOTE: continuum TDM's: '#BB_3#SW-01' and '#BB_4#SW-01'
VELOS = {
# name vlsr [km/s]
'G2432': 31.07,
'G2984': 18.42,
'G3302': 66.37,
'G3604': 51.51,
'G4029': 81.51,
}
LINE_CHANS_FMT = {
'G2432': '{0}:16~23:101~107,{1}:23~30',
'G2984': '{0}:16~23:101~107,{1}:23~30',
'G3302': '{0}:16~23:101~107,{1}:23~30',
'G3604': '{0}:16~23:101~107,{1}:23~30',
'G4029': '{0}:16~23:101~107,{1}:23~30',
}
class ImagingConfig(object):
line_chans_fmt = LINE_CHANS_FMT
path_base_fmt = '{0}/calibrated_{0}'
imsize = [500, 500]
cell = '0.23arcsec'
gridder = 'standard'
deconvolver = 'hogbom'
scales = None
robust = 1.0
refant = 'DA49'
n_sb = 2
n_spw = 6
cont_spw_id = 0
spw_id_start = 0
def __init__(self, name):
assert name in VELOS
self.name = name
self.path_base = self.path_base_fmt.format(name)
self.vis = self.path_base + '.ms'
self.viscs = self.vis + '.contsub'
self.vislf = self.path_base + '_linefree.ms'
self.vlsr = VELOS[name]
@property
def line_chans(self):
line_chans_fmt = self.line_chans_fmt[self.name]
spw_ids = np.arange(self.spw_id_start, self.n_spw)
line_chans = ','.join([
# "splat" the array into the positional args of the format string
line_chans_fmt.format(*(self.n_spw * ii + spw_ids))
for ii in range(self.n_sb)
])
return line_chans
@property
def chan_widths(self):
width = self.n_sb * [
8, # 0, continuum 98 GHz
8, # 1, continuum 100 GHz
]
# ^ NOTE that these are the number of native channels wide the new
# channels will be. Ex., the 128 continuum channels will be
# averaged into 16 channels that are 8 native channels wide.
return width
def start_vlsr(self, spw):
half_width = float(spw.velo_width.strip('km/s')) * spw.nchan / 2
start_velo = VELOS[self.name] - half_width
return '{0:.2f}km/s'.format(start_velo)
def get_one_spw(self, spw, contsub=False):
vis = self.viscs if contsub else self.vis
return get_spw_from_name(vis, spw)
def get_cont_spw(self, linefree=False):
vis = self.vislf if linefree else self.vis
return get_spw_from_name(vis, spw=None)
class ImagingConfigAca(ImagingConfig):
line_chans_fmt = LINE_CHANS_FMT
path_base_fmt = '{0}/calibrated_{0}_7m'
imsize = [144, 144]
cell = '1.3arcsec'
refant = 'CM03'
n_sb = 9
cont_spw_id = 4
spw_id_start = 0
@property
def chan_widths(self):
width = super(ImagingConfigAca, self).chan_widths
return width[2:] + width[:2]
class ImagingConfigJoint(ImagingConfig):
line_chans_fmt = LINE_CHANS_FMT
path_base_fmt = '{0}/calibrated_{0}_joint'
imsize = [250, 250]
cell = '0.46arcsec'
gridder = 'mosaic'
deconvolver = 'multiscale'
scales = [0, 7, 14, 28]
smallscalebias = 0.6
robust = 1.0
refant = ['DA49', 'CM03']
n_sb = 11
cont_spw_id = None
spw_id_start = None
def get_joint_pair(name):
return [ImagingConfig(name), ImagingConfigAca(name)]
def get_spw_from_name(vis, spw=None):
"""
Use the `msmd` tool to identify the individual SPW ID numbers for a common
baseband and spectral window pair, which should always be consistent between
the 12m & 7m MSs, as setup by the OT.
Parameters
----------
vis : str
spw : namedtuple (from `SPWS`), default None
An SPW object, or `None` for the continuum
Returns
-------
spws : str
Comma seperated list of the ID numbers for each SPW
"""
msmd.open(vis)
labels = msmd.namesforspws()
if spw is None:
ot_name = '#BB_4#SW-01'
if type(spw) is str:
ot_name = spw
else:
ot_name = spw.ot_name
ot_name = ot_name + '#FULL_RES'
spws = ','.join([
str(i) for i,s in enumerate(labels) if ot_name in s
])
msmd.close()
return spws
###############################################################################
# General utility functions
###############################################################################
def check_delete_image_files(imagename, preserve_mask=False):
"""
Check for and remove (if they exist) files created by clean such as '.flux',
'.image', etc.
Parameters
----------
imagename : str
The relative path name to the files to delete.
preserve_mask : bool, default False
Whether to preserve the `.mask` file extension
"""
print(':: Check for and remove existing files')
exts = [
'.flux', '.pb', '.image', '.weight', '.model', '.pbcor', '.psf',
'.sumwt', '.residual', '.flux.pbcoverage'
]
if not preserve_mask:
exts += ['.mask']
for ext in exts:
if os.path.exists(imagename+ext):
filen = imagename + ext
print('-- Removing {0}'.format(filen))
rmtables(filen)
if os.path.exists(imagename+'.residual'):
print('-- Hard remove .residual')
shutil.rmtree(imagename+'.residual')
def export_fits(imagename):
print(':: Exporting fits')
exportfits(imagename, imagename+'.fits', velocity=True, overwrite=True)
def concat_arrays(imcl, linefree=False, contsub=False, overwrite=True):
print(':: Check for and remove existing files')
if linefree:
concatvis = imcl[0].path_base+'_joint_linefree.ms'
elif contsub:
concatvis = imcl[0].path_base+'_joint.ms.contsub'
else:
concatvis = imcl[0].path_base+'_joint.ms'
if overwrite & os.path.exists(concatvis):
print('-- Removing {0}'.format(concatvis))
rmtables(concatvis)
print(':: Concatenating ms files')
if linefree:
concat(vis=[imcl[0].vislf, imcl[1].vislf], concatvis=concatvis)
elif contsub:
concat(vis=[imcl[0].viscs, imcl[1].viscs], concatvis=concatvis)
else:
concat(vis=[imcl[0].vis, imcl[1].vis], concatvis=concatvis)
def primary_beam_correct(imagebase, overwrite=True, export=True):
print(':: Check for and remove existing files')
imagename = imagebase + '.image'
pbimage = imagebase + '.pb'
pbcorimage = imagebase + '.pbcor'
impbcor(imagename=imagename, pbimage=pbimage, outfile=pbcorimage,
overwrite=overwrite)
if export:
export_fits(pbcorimage)
###############################################################################
# Continuum imaging
###############################################################################
def clean_continuum(imc, linefree=False, remove_existing=True, export=False):
"""
Image the continuum using only the single continuum TDM spectral window, or
the linefree channels.
Parameters
----------
imc : ImagingConfig
linefree : bool, default False
"""
if linefree:
vis = imc.vislf
spw = ''
imagename = imc.path_base
else:
vis = imc.vis
spw = imc.get_cont_spw()
imagename = imc.path_base +'_single'
if remove_existing:
check_delete_image_files(imagename)
print(':: Running clean')
tclean(vis=vis,
imagename=imagename,
spw=spw,
specmode='mfs',
imsize=imc.imsize,
cell=imc.cell,
gridder=imc.gridder,
deconvolver=imc.deconvolver,
restoringbeam='common',
weighting='briggs',
robust=imc.robust,
niter=100000,
interactive=True)
if export:
export_fits(imagename+'.image')
def split_linefree(imc):
"""
Split out the line-free continuum channels from the measurement set and
apply channel averaging (for the higher resolution line channels). The
flags are backed up, line channels flagged, measurement set split, and the
flag state restored. It should be OK to initialize the weight each time,
since when found will no-op.
Parameters
----------
source_name : ImagingConfig
"""
now = str(datetime.datetime.utcnow())
comment = 'Flags before split for line-free (UTC: {0})'.format(now)
print(':: Saving flag backup : `before_split`')
flagmanager(vis=imc.vis, mode='save', versionname='before_split',
comment=comment)
initweights(vis=imc.vis, wtmode='weight', dowtsp=True)
print(':: Flagging')
flagdata(vis=imc.vis, mode='manual', spw=imc.line_chans)
print(':: Split out and channel average')
if os.path.exists(imc.vislf):
print(':: Deleting existing split ms')
try:
shutil.rmtree(imc.vislf+'.flagversions')
except OSError:
pass
rmtables(imc.vislf)
split(vis=imc.vis, outputvis=imc.vislf, width=imc.chan_widths, datacolumn='data')
print(':: Restoring flag backup')
flagmanager(vis=imc.vis, mode='restore', versionname='before_split')
flagmanager(vis=imc.vis, mode='delete', versionname='before_split')
###############################################################################
# Self calibration
###############################################################################
def selfcal_image(imc, trial='0'):
"""
Self Calibration. Initiate the self-calibration process with a shallow clean
to start. Here clean only the secure detections of emission.
"""
if trial == '0':
mask = ''
else:
mask = imc.path_base + '_p{0}.mask'.format(int(trial)-1)
vis = imc.vislf
imagename = imc.path_base + '_p{0}'.format(trial)
check_delete_image_files(imagename)
tclean(vis=vis,
imagename=imagename,
mask=mask,
imsize=imc.imsize,
cell=imc.cell,
gridder='standard',
restoringbeam='common',
weighting='briggs',
robust=imc.robust,
specmode='mfs',
interactive=True,
niter=100000,
savemodel='modelcolumn')
tclean(vis=vis,
imagename=imagename,
imsize=imc.imsize,
cell=imc.cell,
gridder='standard',
restoringbeam='common',
weighting='briggs',
robust=imc.robust,
specmode='mfs',
interactive=False,
niter=0,
calcres=False,
calcpsf=False,
savemodel='modelcolumn')
# ^ NOTE The interactve thru calcpsf options are necessary to save the
# model
print('-- Check RMS and beam')
def selfcal_pcal(imc, combine='spw,scan', trial='1'):
"""
Self Calibration. Generate a per observation solution first.
"""
if trial == '1':
solint = 'inf' # Sum entire scheduling block
elif trial == '2':
solint = '1800s' # Half block
else:
raise ValueError('Invalid pcal trial: {0}'.format(trial))
caltable = imc.path_base + '.pcal' + trial
rmtables(caltable)
gaincal(vis=imc.vislf,
caltable=caltable,
gaintype='T',
refant=imc.refant,
calmode='p',
combine=combine,
solint=solint,
minsnr=3.0,
minblperant=6)
print('-- Check number of solutions lost')
def selfcal_pcal_plot(imc, trial='1'):
""" Self Calibration. Check the solution """
plotcal(caltable=imc.path_base+'.pcal'+trial,
xaxis='time', yaxis='phase', timerange='', iteration='',
subplot=111, plotrange=[0,0,-180,180])
def selfcal_apcal(imc, trial='1'):
""" Self Calibration. Apply the calibration from the phase only solution """
flagmanager(imc.vislf, mode='restore', versionname='startup')
applycal(vis=imc.vislf,
spwmap=np.zeros(54),
interp='linearPDperobs',
gaintable=[imc.path_base+'.pcal'+trial],
calwt=False,
flagbackup=False)
###############################################################################
# Line imaging
###############################################################################
def clean_line(imc, spw, contsub=False, fullcube=False, interactive=True,
export=False, use_existing_psf=False, automask=False, **kwargs):
"""
Use `tclean` to CLEAN a spectral cube. Additional keyword arguements are
passed to `tclean`.
Parameters
----------
imc : ImageConfig
spw : Spw
contsub : bool
Use the continuum subtracted measurement set
fullcube : bool
Yield a cube with the full number of channels, as opposed to clipping
based on the `spw` around a velocity range about the line.
interactive : bool
Interactively clean in the viewer
export : bool
Export the CASA image files to FITS
use_existing_psf : bool
Skip first major cycle, using existing `.psf` file
automask : str, default None
Use automasking from:
'auto' -> usemask='autothresh'
'multi' -> usemask='multithresh'
"""
vis = imc.viscs if contsub else imc.vis
imagename = '{0}_{1}'.format(imc.path_base, spw.name)
tclean_kwargs = {
'interactive': interactive,
'niter': 1000000 if interactive else 0,
'start': '' if fullcube else imc.start_vlsr(spw),
'width': '' | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.369768,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 4.23006,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.023245,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.220946,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.131656,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0536798,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0865835,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0437044,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.183968,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0412103,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.13179,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0248725,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00225157,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0247354,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0166518,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.049608,
'Execution Unit/Register Files/Runtime Dynamic': 0.0189033,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0579232,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.15908,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.988274,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 4.39474e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 4.39474e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 3.84241e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.49544e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000239204,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000365523,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000416149,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0160078,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.01823,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.038844,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0543695,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.28616,
'Instruction Fetch Unit/Runtime Dynamic': 0.110003,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0427196,
'L2/Runtime Dynamic': 0.0124854,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.9148,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.342833,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0219246,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0219246,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.01833,
'Load Store Unit/Runtime Dynamic': 0.472882,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0540624,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.108125,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0191869,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0198261,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.06331,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0063751,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.252379,
'Memory Management Unit/Runtime Dynamic': 0.0262012,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3209,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0654283,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00321813,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0264341,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
# Copyright 2017 The Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains all the basic operations."""
from abc import ABCMeta, abstractmethod
import logging
import math
import os
import sys
from . import graph
# import graph
# Enable swig by environment variable
if "ENABLE_SWIG_OP" in os.environ:
logging.info("Enable swig operations by environment variable")
sys.path.append("../")
import swig.op
class Op(object):
"""The basic class for all operation."""
def __init__(self, name="Op"):
# Be compatiable with TensorFlow to remove underline
self.name = name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
@abstractmethod
def forward(self):
# TODO: No need to implement in abstract method
raise NotImplementedError
def grad(self):
raise NotImplementedError
def __add__(self, other):
return AddOp(self, other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return MinusOp(self, other)
def __rsub__(self, other):
return MinusOp(other, self)
def __mul__(self, other):
return MultipleOp(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
return DivideOp(self, other)
def __truediv__(self, other):
return DivideOp(self, other)
def __rdiv__(self, other):
return DivideOp(other, self)
def __pow__(self, power, modulo=None):
return PowerOp(self, power)
class PlaceholderOp(Op):
"""The placeholer operation which value is set when Session.run()"""
def __init__(self, dtype=None, shape=None, name="Placeholder"):
super(PlaceholderOp, self).__init__(name)
# TODO: Use dtype and shape
self._dtype = dtype
self._shape = shape
# The value is None util Session.run() with feed_dict parameter
self._value = None
# TODO: Support other graph instance
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def set_value(self, value):
self._value = value
def get_value(self):
return self._value
def forward(self):
return self._value
def grad(self, partial_derivative_opname=None):
return 0
class ConstantOp(Op):
"""The constant operation which contains one initialized value."""
def __init__(self, value, name="Constant"):
super(ConstantOp, self).__init__(name)
self._value = value
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
# TODO: Not allow to set the value
def get_value(self):
return self._value
def forward(self):
return self._value
def grad(self, partial_derivative_opname=None):
return 0
class VariableOp(Op):
"""
The variable operation which contains one variable. The variable may be
trainable or not-trainable. This is used to define the machine learning
models.
"""
def __init__(self, value, is_trainable=True, name="Variable"):
super(VariableOp, self).__init__(name)
self._value = value
self._is_trainable = is_trainable
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
if self._is_trainable:
self._graph.add_to_trainable_variables_collection(self.get_name(), self)
def get_value(self):
return self._value
def set_value(self, value):
self._value = value
def forward(self):
return self._value
def grad(self, partial_derivative_opname=None):
if partial_derivative_opname is None:
grad = 1
else:
if self.name == partial_derivative_opname:
# Specify to compute this derivative
grad = 1
else:
# Specify to compute other derivative
grad = 0
return grad
class PowerOp(Op):
def __init__(self, input, power, name="Power"):
super(PowerOp, self).__init__(name)
if not isinstance(input, Op):
self._op = ConstantOp(input)
else:
self._op = input
self._power = power
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def forward(self):
result = pow(self._op.forward(), self._power)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self._op, PlaceholderOp) or isinstance(self._op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self._op, VariableOp):
# op is the variable
grad = self._power * pow(self._op.forward(), self._power - 1)
else:
# op is other complex operation and use chain rule
grad = self._power * pow(self._op.forward(), self._power - 1
) * self._op.grad(partial_derivative_opname)
return grad
class SquareOp(PowerOp):
def __init__(self, input, name="Square"):
super(SquareOp, self).__init__(input, 2, name)
class SquareOpOld(Op):
# TODO: Deprecated op
def __init__(self, input, name="Square"):
if not isinstance(input, Op):
self.op = ConstantOp(input)
else:
self.op = input
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
if "ENABLE_SWIG_OP" in os.environ:
result = swig.op.square(self.op.forward())
else:
result = pow(self.op.forward(), 2)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op, PlaceholderOp) or isinstance(self.op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self.op, VariableOp):
# op is the variable
if "ENABLE_SWIG_OP" in os.environ:
grad = swig.op.multiple(2, self.op.forward())
else:
grad = 2 * self.op.forward()
else:
# op is other complex operation and use chain rule
grad = 2 * self.op.forward() * self.op.grad(partial_derivative_opname)
return grad
class CubicOp(PowerOp):
def __init__(self, input, name="Cubic"):
super(CubicOp, self).__init__(input, 3, name)
class CubicOpOld(Op):
# TODO: Deprecated op
def __init__(self, input, name="Cubic"):
if not isinstance(input, Op):
self.op = ConstantOp(input)
else:
self.op = input
self.name = name
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
if "ENABLE_SWIG_OP" in os.environ:
result = swig.op.cubic(self.op.forward())
else:
result = math.pow(self.op.forward(), 3)
return result
def grad(self, partial_derivative_opname=None):
if isinstance(self.op, PlaceholderOp) or isinstance(self.op, ConstantOp):
# op is the constant
grad = 0
elif isinstance(self.op, VariableOp):
# op is the variable
if "ENABLE_SWIG_OP" in os.environ:
grad = swig.op.multiple(3, swig.op.square(self.op.forward()))
else:
grad = 3 * math.pow(self.op.forward(), 2)
else:
# op is other complex operation
grad = 3 * math.pow(self.op.forward(),
2) * self.op.grad(partial_derivative_opname)
return grad
def SigmoidOp(Op):
# TODO: Need to implement the forward and grad functions
def __init__(self, input, name="Sigmoid"):
super(SigmoidOp, self).__init__(name)
class AddOp(Op):
"""
The addition operation which has only two inputs. The input can be
primitive, ConstantOp, PlaceholerOp, VariableOp or other ops.
"""
def __init__(self, input1, input2, name="Add"):
super(AddOp, self).__init__(name)
if not isinstance(input1, Op):
self._op1 = ConstantOp(input1)
else:
self._op1 = input1
if not isinstance(input2, Op):
self._op2 = ConstantOp(input2)
else:
self._op2 = input2
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def forward(self):
result = self._op1.forward() + self._op2.forward()
return result
def grad(self, partial_derivative_opname=None):
result = self._op1.grad(partial_derivative_opname) + self._op2.grad(
partial_derivative_opname)
return result
class MinusOp(Op):
"""
The minus operation.
"""
def __init__(self, input1, input2, name="Minus"):
super(MinusOp, self).__init__(name)
if not isinstance(input1, Op):
self._op1 = ConstantOp(input1)
else:
self._op1 = input1
if not isinstance(input2, Op):
self._op2 = ConstantOp(input2)
else:
self._op2 = input2
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def forward(self):
result = self._op1.forward() - self._op2.forward()
return result
def grad(self, partial_derivative_opname=None):
result = self._op1.grad(partial_derivative_opname) - self._op2.grad(
partial_derivative_opname)
return result
class AddNOp(Op):
def __init__(self, *inputs):
# TODO: Deprecated op
# TODO: Support user defined name in the parameter
self.name = "AddN"
self.ops = []
for input in inputs:
if not isinstance(input, Op):
input = ConstantOp(input)
self.ops.append(input)
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = 0
for op in self.ops:
result += op.forward()
return result
def grad(self, partial_derivative_opname=None):
result = 0
for op in self.ops:
result += op.grad(partial_derivative_opname)
return result
class MultipleOp(Op):
def __init__(self, input1, input2, name="Multiple"):
super(MultipleOp, self).__init__(name)
if not isinstance(input1, Op):
self._op1 = ConstantOp(input1)
else:
self._op1 = input1
if not isinstance(input2, Op):
self._op2 = ConstantOp(input2)
else:
self._op2 = input2
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def forward(self):
result = self._op1.forward() * self._op2.forward()
return result
def grad(self, partial_derivative_opname=None):
op1_value = self._op1.forward()
op2_value = self._op2.forward()
if isinstance(self._op1, PlaceholderOp) or isinstance(
self._op1, ConstantOp):
# op1 is the coefficient of this formula
op1_grad = self._op1.forward()
if isinstance(self._op2, PlaceholderOp) or isinstance(
self._op2, ConstantOp):
# two elements are both constant values
op2_grad = 0
else:
# op2 may has VariableOp
op2_grad = self._op2.grad(partial_derivative_opname)
result = op1_grad * op2_grad
elif isinstance(self._op2, PlaceholderOp) or isinstance(
self._op2, ConstantOp):
# op2 is the coefficient of this formula
op2_grad = self._op2.forward()
# op1 may has VariableOp
op1_grad = self._op1.grad(partial_derivative_opname)
result = op1_grad * op2_grad
else:
# op1 and op2 may has VariableOp
# Refer to https://en.wikipedia.org/wiki/Product_rule
#logging.error(
# "Not support complex formula which has multiple VariableOp")
#raise NotImplementedError
op1_grad = self._op1.grad(partial_derivative_opname)
op2_grad = self._op2.grad(partial_derivative_opname)
result = op1_grad * op2_value + op1_value * op2_grad
return result
class MultipleNOp(Op):
# TODO: Deprecated op
"""The multiple operation for n inputs."""
def __init__(self, *inputs):
self.name = "MultipleN"
self.ops = []
for input in inputs:
if not isinstance(input, Op):
input = ConstantOp(input)
self.ops.append(input)
self.graph = graph.get_default_graph()
self.graph.add_to_graph(self)
def forward(self):
result = 1
for op in self.ops:
result *= op.forward()
return result
def grad(self, partial_derivative_opname=None):
# TODO: Check the type of op to compute gradients
result = 1
for op in self.ops:
result *= op.grad(partial_derivative_opname)
return result
class DivideOp(Op):
def __init__(self, input1, input2, name="Divide"):
super(DivideOp, self).__init__(name)
if not isinstance(input1, Op):
self._op1 = ConstantOp(input1)
else:
self._op1 = input1
if not isinstance(input2, Op):
self._op2 = ConstantOp(input2)
else:
self._op2 = input2
self._graph = graph.get_default_graph()
self._graph.add_to_graph(self)
def forward(self):
result = self._op1.forward() / self._op2.forward()
return result
def grad(self, partial_derivative_opname=None):
op1_value = self._op1.forward()
op2_value = self._op2.forward()
if isinstance(self._op1, PlaceholderOp) or isinstance(
self._op1, ConstantOp):
# op1 | |
region, as discussed
# in the paper
do_local_betweenness(G, neighborhood, h, operator.neg)
G.delete_edges(edge)
fix_betweennesses(G)
# adds back in local betweennesses after the deletion
do_local_betweenness(G, neighborhood, h, operator.pos)
return check_for_split(G, tup)
def fix_pair_betweennesses(G):
"""
Given a graph G, makes sure that all of the pair betweennesses
listed as attributes remain possible, and removes those that are not.
Also adds new attributes where new edges have been added.
"""
for v in G.vs:
toDel = []
neededPairs = {uw for uw in itertools.combinations(G.neighbors(v), 2)}
for pair in v['pb']:
if pair not in neededPairs:
toDel.append(pair)
for d in toDel:
del v['pb'][d]
for pair in neededPairs:
if pair not in v['pb']:
v['pb'][pair] = 0
def fix_edge_betweennesses(G):
"""
Given a graph G, makes sure that every edge has a betweenness
score assigned to it.
"""
for e in G.es:
if e['eb'] is None:
e['eb'] = 0
def fix_betweennesses(G):
"""
Fixes the pair and edge betweennesses such that every attribute is up to date.
"""
fix_pair_betweennesses(G)
fix_edge_betweennesses(G)
def split_vertex(G, vToSplit, instr, h):
"""
Splits the vertex v into two new vertices, each with
edges depending on s. Returns True if the split
divided the graph, else False.
"""
neighborhood = get_neighborhood_vertex(G, vToSplit, h)
do_local_betweenness(G, neighborhood, h, operator.neg)
new_index = G.vcount()
G.add_vertex()
G.vs[new_index]['CONGA_orig'] = G.vs[vToSplit]['CONGA_orig']
G.vs[new_index]['pb'] = {uw : 0 for uw in itertools.combinations(G.neighbors(vToSplit), 2)}
# adding all relevant edges to new vertex, deleting from old one.
toAdd = list(zip(itertools.repeat(new_index), instr[0]))
toDelete = list(zip(itertools.repeat(vToSplit), instr[0]))
G.add_edges(toAdd)
G.delete_edges(toDelete)
neighborhood.append(new_index)
fix_betweennesses(G)
logging.info("split: %d, %s", vToSplit, instr)
do_local_betweenness(G, neighborhood, h, operator.pos)
# check if the two new vertices are disconnected.
return check_for_split(G, (vToSplit, new_index))
def max_split_betweenness(G, vInteresting):
"""
Performs the greedy algorithm discussed in the 2007 CONGA paper
to approximate the maximum split betweenness. Returns a tuple
(a, b, c) where a is the maximum score, b the vertex to split
to acheive the score, and c a list of the instructions for which
neighbors to connect to each side of the split.
"""
maxSplitBetweenness = 0
vToSplit = None
# for every vertex of interest, we want to figure out the maximum score achievable
# by splitting the vertices in various ways, and return that optimal split
for v in vInteresting:
clique = create_clique(G, v, G.vs['pb'][v])
if clique.size < 4:
continue
# initialize a list on how we will map the neighbors to the collapsing matrix
vMap = [[ve] for ve in G.neighbors(v)]
# we want to keep collapsing the matrix until we have a 2x2 matrix and its
# score. Then we want to remove index j from our vMap list and concatenate
# it with the vMap[i]. This begins building a way of keeping track of how
# we are splitting the vertex and its neighbors
while clique.size > 4:
i,j,clique = reduce_matrix(clique)
vMap[i] += vMap.pop(j)
if clique[0,1] >= maxSplitBetweenness:
maxSplitBetweenness = clique[0,1]
vToSplit = v
splitInstructions = vMap
if vToSplit is None:
return None
return maxSplitBetweenness, vToSplit, splitInstructions
def do_initial_betweenness(G, h):
"""
Given a graph G and a depth h, calculates all edge and pair betweennesses
and updates G's attributes to reflect the new scores.
"""
# Not guaranteed to work on multigraphs.
all_pairs_shortest_paths = []
# Counter for normalizing scores
pathCounts = Counter()
for ver in G.vs:
logging.info("initializing betweennesses for %d", ver.index)
neighborhood = get_neighborhood_vertex(G, ver, h)
neighborhood.remove(ver.index)
#for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_all_shortest_paths(ver, to=neighborhood)#[i+1:])
all_pairs_shortest_paths += s_s_shortest_paths
# to ignore duplicate edges, uncomment the next line.
#all_pairs_shortest_paths = set(tuple(p) for p in all_pairs_shortest_paths)
for path in all_pairs_shortest_paths:
pathCounts[(path[0], path[-1])] += 1
logging.info("updating all betweenness attributes...")
for path in all_pairs_shortest_paths:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], operator.pos)
def do_local_betweenness(G, neighborhood, h, op=operator.pos):
"""
Given a neighborhood and depth h, recalculates all betweennesses
confined to the neighborhood. If op is operator.neg, it subtracts these
betweennesses from the current ones. Otherwise, it adds them.
"""
all_pairs_shortest_paths = []
pathCounts = Counter()
for i, v in enumerate(neighborhood):
s_s_shortest_paths = G.get_all_shortest_paths(v, to=neighborhood)#[i+1:])
all_pairs_shortest_paths += s_s_shortest_paths
neighSet = set(neighborhood)
neighSize = len(neighborhood)
apsp = []
for path in all_pairs_shortest_paths:
# path does not go out of region
if len(neighSet | set(path)) == neighSize:
pathCounts[(path[0], path[-1])] += 1 # can improve
apsp.append(path)
for path in apsp:
if len(path) <= h + 1:
update_betweenness(G, path, pathCounts[(path[0], path[-1])], op)
def update_betweenness(G, path, count, op):
"""
Given a shortest path in G, along with a count of paths
that length, to determine weight, updates the edge and
pair betweenness dicts with the path's new information.
"""
weight = op(1./count)
pos = 0
while pos < len(path) - 2:
G.vs[path[pos + 1]]['pb'][order_tuple((path[pos], path[pos + 2]))] += weight
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
pos += 1
if pos < len(path) - 1:
G.es[G.get_eid(path[pos], path[pos + 1])]['eb'] += weight
def get_cover(G, OG, comm):
"""
Given the graph, the original graph, and a algorithms
membership list, returns a vertex cover of the communities
referring back to the original algorithms.
"""
coverDict = defaultdict(list)
for i, community in enumerate(comm):
coverDict[community].append(int(G.vs[i]['CONGA_orig']))
return ig.clustering.VertexCover(OG, clusters=list(coverDict.values()))
def vertex_betweeenness_from_eb(G, eb):
"""
Calculates the vertex betweenness scores in G. Returns a list
in which the indices are the vertex indices and the values are
their betweeennesses. The same as G.betweenness(), but faster because
it uses the edge betweenness scores.
(CONGA, page 4, equation 1)
"""
components = G.components()
membership = components.membership
vbs = []
for vertex in G.vs:
numComponents = len(components[membership[vertex.index]])
incidentEdges = G.incident(vertex)
vb = .5 * (sum(G.es[e]['eb'] for e in incidentEdges) - (numComponents - 1))
vbs.append(vb)
return vbs
def get_neighborhood_vertex(G, v, h):
"""
Given a vertex and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
return G.neighborhood(v, order=h)
def get_neighborhood_edge(G, e, h):
"""
Given an edge and a height/depth to
traverse, find the neighborhood as defined in the CONGA
paper.
"""
neigh = set(G.neighborhood(e[0], order=h-1))
neigh.update(G.neighborhood(e[1], order=h-1))
return list(neigh)
def order_tuple(toOrder):
if toOrder[0] <= toOrder[1]:
return toOrder
return (toOrder[1], toOrder[0])
def create_clique(G, v, pb):
"""
Given a vertex and its pair betweennesses, returns a k-clique
representing all of its neighbors, with edge weights determined by the pair
betweenness scores. Algorithm discussed on page 5 of the CONGA paper.
"""
neighbors = G.neighbors(v)
# map each neighbor to its index in the adjacency matrix
mapping = {neigh : i for i, neigh in enumerate(neighbors)}
n = len(neighbors)
# Can use ints instead: (dtype=int). Only works if we use matrix_min
# instead of mat_min.
clique = np.matrix(np.zeros((n, n)))
for uw, score in pb.items():
clique[mapping[uw[0]], mapping[uw[1]]] = score
clique[mapping[uw[1]], mapping[uw[0]]] = score
# Ignore any self loops if they're there. If not, this line
# does nothing and can be removed.
np.fill_diagonal(clique, 0)
return clique
def reduce_matrix(M):
"""
Given a matrix M, collapses the row and column of the minimum value. This is just
an adjacency matrix way of implementing the greedy "collapse" discussed in CONGA.
Returns the new matrix and the collapsed indices.
"""
i,j = mat_min(M)
#i, j = matrix_min(M)
# add the ith row to the jth row and overwrite the ith row with those values
M[i,:] = M[j,:] + M[i,:]
# delete the jth row
M = np.delete(M, (j), axis=0)
# similarly with the columns
M[:,i] = M[:,j] + M[:,i]
M = np.delete(M, (j), axis=1)
np.fill_diagonal(M,0) # not sure necessary.
return i,j,M
def check_for_split(G, edge):
"""
Given an edge in tuple form, check if it splits the
graph into two disjoint clusters. If so, it returns
True. Otherwise, False.
"""
# Possibly keep a record of splits.
try:
return not G.edge_disjoint_paths(source=edge[0], target=edge[1])
# TODO: specify exception (when edge = target)
except Exception as e:
return False
def mat_min(M):
"""
Given a matrix, find an index of the minimum value (not including the
diagonal).
"""
# take a matrix we pass in, and fill the diagonal with the matrix max. This is
# so that we don't grab any values from the diag.
np.fill_diagonal(M, float('inf'))
# figure | |
# query component names from data_stage01_isotopomer_mqresultstable
def get_componentsNames_experimentIDAndSampleID(self,experiment_id_I,sample_id_I,exp_type_I=5):
'''Querry component names that are used and are not IS from
the experiment and sample_id'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_id_I),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleNameAbbreviation(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=5):
'''Querry component names that are used from
the experiment and sample_name_abbreviation'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
sample.sample_id.like(sample_description.sample_id),
experiment.sample_name.like(sample.sample_name),
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentsNames_experimentIDAndSampleName(self,experiment_id_I,sample_name_I,exp_type_I=5):
'''Querry component names that are used and not internal standards from
the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name).filter(
experiment.sample_name.like(sample_name_I),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
data_stage01_isotopomer_MQResultsTable.is_.is_(False)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
# query component group names from data_stage01_isotopomer_mqresultstable
def get_componentGroupNames_sampleName(self,sample_name_I):
'''Querry component group names that are used from the sample name
NOTE: intended to be used within a for loop'''
try:
component_group_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_group_name).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_group_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_group_name.asc()).all();
component_group_names_O = [];
for cgn in component_group_names: component_group_names_O.append(cgn.component_group_name);
return component_group_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentGroupName_experimentIDAndComponentName(self,experiment_id_I,component_name_I,exp_type_I=5):
'''Querry component group names that are used from the component name
NOTE: intended to be used within a for loop'''
try:
component_group_name = self.session.query(data_stage01_isotopomer_MQResultsTable.component_group_name).filter(
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_group_name).all();
if len(component_group_name)>1:
print('more than 1 component_group_name retrieved per component_name')
component_group_name_O = component_group_name[0];
return component_group_name_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, intensity,
# precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndData_experimentIDAndSampleNameAndMSMethodType(self,experiment_id_I,sample_name_I,ms_methodtype_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment and sample_name'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
data_stage01_isotopomer_MQResultsTable.height, #peak height
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.sample_name.like(data_stage01_isotopomer_MQResultsTable.sample_name),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
intensities_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
intensities_O.append(cn.height);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
return component_names_O, component_group_names_O, intensities_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilution(self,experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
if not component_names:
print('No component information found for:');
print('experiment_id\tsample_name_abbreviation\tms_methodtype\ttime_point,dilution');
print(experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
else:
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query component names, group names, precursor formula, product formula, precursor mass, product mass
def get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilutionAndMetID(self,experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,met_id_I,exp_type_I=5):
'''Querry component names, group names, fragment formula, and fragment mass
that are used the experiment'''
try:
component_names = self.session.query(data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
sample.sample_id.like(sample_description.sample_id),
sample.sample_dilution == dilution_I,
experiment.sample_name.like(sample.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_isotopomer_MQResultsTable.sample_name.like(experiment.sample_name),
data_stage01_isotopomer_MQResultsTable.used_.is_(True),
MS_components.component_name.like(data_stage01_isotopomer_MQResultsTable.component_name),
MS_components.ms_methodtype.like(ms_methodtype_I),
MS_components.met_id.like(met_id_I)).group_by(
data_stage01_isotopomer_MQResultsTable.component_name,
data_stage01_isotopomer_MQResultsTable.component_group_name,
MS_components.precursor_formula,
MS_components.precursor_exactmass,
MS_components.product_formula,
MS_components.product_exactmass).order_by(
data_stage01_isotopomer_MQResultsTable.component_name.asc()).all();
component_names_O = [];
component_group_names_O = [];
precursor_formulas_O = [];
precursor_masses_O = [];
product_formulas_O = [];
product_masses_O = [];
#component_names_O = None;
#component_group_names_O = None;
#precursor_formulas_O = None;
#precursor_masses_O = None;
#product_formulas_O = None;
#product_masses_O = None;
if not component_names:
print('No component information found for:');
print('experiment_id\tsample_name_abbreviation\tms_methodtype\ttime_point\tdilution\tmet_id');
print(experiment_id_I,sample_name_abbreviation_I,ms_methodtype_I,time_point_I,dilution_I,met_id_I);
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
else:
for cn in component_names:
component_names_O.append(cn.component_name);
component_group_names_O.append(cn. component_group_name);
precursor_formulas_O.append(cn.precursor_formula);
precursor_masses_O.append(cn.precursor_exactmass);
product_formulas_O.append(cn.product_formula);
product_masses_O.append(cn.product_exactmass);
#component_names_O=component_names[0][0];
#component_group_names_O=component_names[0][1];
#precursor_formulas_O=component_names[0][2];
#precursor_masses_O=component_names[0][3];
#product_formulas_O=component_names[0][4];
#product_masses_O=component_names[0][5];
return component_names_O, component_group_names_O,\
precursor_formulas_O, precursor_masses_O, product_formulas_O, product_masses_O;
except SQLAlchemyError as e:
print(e);
# query physiological parameters from data_stage01_isotopomer_mqresultstable
def get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleName(self,sample_name_I):
'''Querry culture volume sampled, culture volume sampled units, and OD600 from sample name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(sample_physiologicalParameters.culture_volume_sampled,
sample_physiologicalParameters.culture_volume_sampled_units,
sample_physiologicalParameters.od600,
sample_description.reconstitution_volume,
sample_description.reconstitution_volume_units).filter(
sample.sample_name.like(sample_name_I),
sample.sample_id.like(sample_physiologicalParameters.sample_id),
sample.sample_id.like(sample_description.sample_id)).all();
cvs_O = physiologicalParameters[0][0];
cvs_units_O = physiologicalParameters[0][1];
od600_O = physiologicalParameters[0][2];
dil_O = physiologicalParameters[0][3];
dil_units_O = physiologicalParameters[0][4];
return cvs_O, cvs_units_O, od600_O, dil_O, dil_units_O;
except SQLAlchemyError as e:
print(e);
def get_CVSAndCVSUnitsAndODAndDilAndDilUnits_sampleNameShort(self,sample_name_short_I):
'''Querry culture volume sampled, culture volume sampled units, and OD600 from sample name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(sample_physiologicalParameters.culture_volume_sampled,
sample_physiologicalParameters.culture_volume_sampled_units,
sample_physiologicalParameters.od600,
sample_description.reconstitution_volume,
sample_description.reconstitution_volume_units).filter(
sample_description.sample_name_short.like(sample_name_short_I),
sample_description.sample_id.like(sample_physiologicalParameters.sample_id)).all();
cvs_O = physiologicalParameters[0][0];
cvs_units_O = physiologicalParameters[0][1];
od600_O = physiologicalParameters[0][2];
dil_O = physiologicalParameters[0][3];
dil_units_O = physiologicalParameters[0][4];
return cvs_O, cvs_units_O, od600_O, dil_O, dil_units_O;
except SQLAlchemyError as e:
print(e);
def get_conversionAndConversionUnits_biologicalMaterialAndConversionName(self,biological_material_I,conversion_name_I):
'''Querry conversion and conversion units from
biological material and conversion name
NOTE: intended to be used within a for loop'''
try:
physiologicalParameters = self.session.query(biologicalMaterial_massVolumeConversion.conversion_factor,
biologicalMaterial_massVolumeConversion.conversion_units).filter(
biologicalMaterial_massVolumeConversion.biological_material.like(biological_material_I),
biologicalMaterial_massVolumeConversion.conversion_name.like(conversion_name_I)).all();
conversion_O = physiologicalParameters[0][0];
conversion_units_O = physiologicalParameters[0][1];
return conversion_O, conversion_units_O;
except SQLAlchemyError as e:
print(e);
# query data from data_stage01_isotopomer_mqresultstable
def get_concAndConcUnits_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry data (i.e. concentration, area/peak height ratio) from sample name and component name
NOTE: intended to be used within a for loop'''
# check for absolute or relative quantitation (i.e. area/peak height ratio)
try:
use_conc = self.session.query(data_stage01_isotopomer_MQResultsTable.use_calculated_concentration).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if use_conc:
use_conc_O = use_conc[0][0];
else:
use_conc_O = None;
except SQLAlchemyError as e:
print(e);
if use_conc_O:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.calculated_concentration,
data_stage01_isotopomer_MQResultsTable.conc_units).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
# check for area or peak height ratio from quantitation_method
try:
data = self.session.query(quantitation_method.use_area).filter(
experiment.sample_name.like(sample_name_I),
experiment.quantitation_method_id.like(quantitation_method.id),
quantitation_method.component_name.like(component_name_I)).all();
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
except SQLAlchemyError as e:
print(e);
if ratio_O:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.area_ratio).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'area_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
else:
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.height_ratio).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
conc_O = data[0][0];
conc_units_O = 'height_ratio';
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_peakHeight_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry peakHeight from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.height).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name.like(component_name_I),
data_stage01_isotopomer_MQResultsTable.used_.is_(True)).all();
if data:
height_O = data[0][0];
else:
height_O = None;
return height_O
except SQLAlchemyError as e:
print(e);
# query if used
def get_used_sampleNameAndComponentName(self,sample_name_I,component_name_I):
'''Querry used from sample name and component name
NOTE: intended to be used within a for loop'''
try:
data = self.session.query(data_stage01_isotopomer_MQResultsTable.used_).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(sample_name_I),
data_stage01_isotopomer_MQResultsTable.component_name_name.like(component_name_name_I)).all();
if data:
used_O = data[0];
else: used_O = None;
return used_O;
except SQLAlchemyError as e:
print(e);
# delet data from data_stage01_isotopomer_mqresultstable
def delete_row_sampleName(self,sampleNames_I):
'''Delete specific samples from an experiment by their sample name'''
deletes = [];
for d in sampleNames_I:
try:
delete = self.session.query(data_stage01_isotopomer_MQResultsTable).filter(
data_stage01_isotopomer_MQResultsTable.sample_name.like(d['sample_name'])).delete(
synchronize_session=False);
if delete == 0:
print('row not found')
print(d);
deletes.append(delete);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def add_dataStage01IsotopomerMQResultsTable(self,data_I):
'''add rows of data_stage01_isotopomer_MQResultsTable'''
if data_I:
cnt = 0;
for d in data_I:
try:
if 'Index' in d:
data_add = data_stage01_isotopomer_MQResultsTable(d['Index'],
d['Sample Index'],
d['Original Filename'],
d['Sample Name'],
d['Sample ID'],
d['Sample Comment'],
d['Sample Type'],
d['Acquisition Date & Time'],
d['Rack Number'],
d['Plate Number'],
d['Vial Number'],
d['Dilution Factor'],
d['Injection Volume'],
d['Operator Name'],
d['Acq. Method Name'],
d['IS'],
d['Component Name'],
d['Component Index'],
d['Component Comment'],
d['IS Comment'],
d['Mass Info'],
d['IS Mass Info'],
d['IS Name'],
d['Component Group Name'],
d['Conc. Units'],
d['Failed Query'],
d['IS Failed Query'],
d['Peak Comment'],
d['IS Peak Comment'],
d['Actual Concentration'],
d['IS Actual Concentration'],
d['Concentration Ratio'],
d['Expected RT'],
d['IS Expected RT'],
d['Integration Type'],
d['IS Integration Type'],
d['Area'],
d['IS Area'],
d['Corrected Area'],
d['IS Corrected Area'],
d['Area Ratio'],
d['Height'],
d['IS Height'],
d['Corrected Height'],
d['IS Corrected Height'],
d['Height Ratio'],
d['Area / Height'],
d['IS Area / Height'],
d['Corrected Area/Height'],
d['IS Corrected Area/Height'],
d['Region Height'],
d['IS Region Height'],
d['Quality'],
d['IS Quality'],
d['Retention Time'],
d['IS Retention Time'],
d['Start Time'],
d['IS Start Time'],
d['End Time'],
d['IS | |
import time, datetime, argparse
import os, sys
import numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
import copy as cp
import pickle
PROJECT_PATH = '/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/'
sys.path.append(PROJECT_PATH)
import casadi as cas
import src.MPC_Casadi as mpc
import src.TrafficWorld as tw
import src.IterativeBestResponseMPCMultiple as mibr
import src.car_plotting_multiple as cmplot
##########################################################
svo_theta = np.pi/4.0
# random_seed = args.random_seed[0]
random_seed = 3
NEW = True
if NEW:
optional_suffix = "ellipses"
subdir_name = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + optional_suffix
folder = "results/" + subdir_name + "/"
os.makedirs(folder)
os.makedirs(folder+"imgs/")
os.makedirs(folder+"data/")
os.makedirs(folder+"vids/")
os.makedirs(folder+"plots/")
else:
subdir_name = "20200224-103456_real_dim_CA"
folder = "results/" + subdir_name + "/"
print(folder)
if random_seed > 0:
np.random.seed(random_seed)
#######################################################################
T = 3 # MPC Planning Horizon
dt = 0.3
N = int(T/dt) #Number of control intervals in MPC
n_rounds_mpc = 6
percent_mpc_executed = 0.5 ## This is the percent of MPC that is executed
number_ctrl_pts_executed = int(np.floor(N*percent_mpc_executed))
XAMB_ONLY = False
n_other = 2
n_rounds_ibr = 2
world = tw.TrafficWorld(2, 0, 1000)
# large_world = tw.TrafficWorld(2, 0, 1000, 5.0)
#########################################################################
actual_xamb = np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1))
actual_uamb = np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed))
actual_xothers = [np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)) for i in range(n_other)]
actual_uothers = [np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed)) for i in range(n_other)]
actual_all_other_x0 = [np.zeros((6, 2*N)) for i in range(n_other)]
xamb = np.zeros(shape=(6, N+1))
t_start_time = time.time()
####################################################
## Create the Cars in this Problem
all_other_x0 = []
all_other_u = []
all_other_MPC = []
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
next_x0 = 0
for i in range(n_other):
x1_MPC = mpc.MPC(dt)
x1_MPC.n_circles = 3
x1_MPC.theta_iamb = svo_theta
x1_MPC.N = N
x1_MPC.k_change_u_v = 0.001
x1_MPC.max_delta_u = 50 * np.pi/180 * x1_MPC.dt
x1_MPC.k_u_v = 0.01
x1_MPC.k_u_delta = .00001
x1_MPC.k_change_u_v = 0.01
x1_MPC.k_change_u_delta = 0.001
x1_MPC.k_s = 0
x1_MPC.k_x = 0
x1_MPC.k_x_dot = -1.0 / 100.0
x1_MPC.k_lat = 0.001
x1_MPC.k_lon = 0.0
x1_MPC.k_phi_error = 0.001
x1_MPC.k_phi_dot = 0.01
####Vehicle Initial Conditions
if i%2 == 0:
lane_number = 0
next_x0 += x1_MPC.L + 2*x1_MPC.min_dist
else:
lane_number = 1
initial_speed = 0.75*x1_MPC.max_v
traffic_world = world
x1_MPC.fd = x1_MPC.gen_f_desired_lane(traffic_world, lane_number, True)
x0 = np.array([next_x0, traffic_world.get_lane_centerline_y(lane_number), 0, 0, initial_speed, 0]).T
## Set the initial control of the other vehicles
u1 = np.zeros((2,N))
# u1[0,:] = np.clip(np.pi/180 *np.random.normal(size=(1,N)), -2 * np.pi/180, 2 * np.pi/180)
SAME_SIDE = False
if lane_number == 1 or SAME_SIDE:
u1[0,0] = 2 * np.pi/180
else:
u1[0,0] = -2 * np.pi/180
u1[0,0] = 0
all_other_MPC += [x1_MPC]
all_other_x0 += [x0]
all_other_u += [u1]
# Settings for Ambulance
amb_MPC = cp.deepcopy(x1_MPC)
amb_MPC.theta_iamb = 0.0
amb_MPC.k_u_v = 0.0000
amb_MPC.k_u_delta = .01
amb_MPC.k_change_u_v = 0.0000
amb_MPC.k_change_u_delta = 0
amb_MPC.k_s = 0
amb_MPC.k_x = 0
amb_MPC.k_x_dot = -1.0 / 100.0
amb_MPC.k_x = -1.0/100
amb_MPC.k_x_dot = 0
amb_MPC.k_lat = 0.00001
amb_MPC.k_lon = 0.0
# amb_MPC.min_v = 0.8*initial_speed
amb_MPC.max_v = 35 * 0.447 # m/s
amb_MPC.k_phi_error = 0.1
amb_MPC.k_phi_dot = 0.01
NO_GRASS = False
amb_MPC.min_y = world.y_min
amb_MPC.max_y = world.y_max
if NO_GRASS:
amb_MPC.min_y += world.grass_width
amb_MPC.max_y -= world.grass_width
amb_MPC.fd = amb_MPC.gen_f_desired_lane(world, 0, True)
x0_amb = np.array([0, 0, 0, 0, initial_speed , 0]).T
pickle.dump(x1_MPC, open(folder + "data/"+"mpc%d"%i + ".p",'wb'))
pickle.dump(amb_MPC, open(folder + "data/"+"mpcamb" + ".p",'wb'))
########################################################################
#### SOLVE THE MPC #####################################################
for i_mpc in range(n_rounds_mpc):
min_slack = np.infty
actual_t = i_mpc * number_ctrl_pts_executed
###### Update the initial conditions for all vehicles
if i_mpc > 0:
x0_amb = xamb[:, number_ctrl_pts_executed]
for i in range(len(all_other_x0)):
all_other_x0[i] = all_other_x[i][:, number_ctrl_pts_executed]
###### Initial guess for the other u. This will be updated once the other vehicles
###### solve the best response to the ambulance. Initial guess just looks at the last solution. This could also be a lange change
# Obtain a simulated trajectory from other vehicle control inputs
all_other_x = [np.zeros(shape=(6, N+1)) for i in range(n_other)]
all_other_x_des = [np.zeros(shape=(3, N+1)) for i in range(n_other)]
for i in range(n_other):
if i_mpc == 0:
all_other_u[i] = np.zeros(shape=(6,N))
else:
all_other_u[i] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
x_mpci, u_all_i, x_0_i = all_other_MPC[i], all_other_u[i], all_other_x0[i]
all_other_x[i], all_other_x_des[i] = x_mpci.forward_simulate_all(x_0_i, u_all_i)
for i_rounds_ibr in range(n_rounds_ibr):
########## Solve the Ambulance MPC ##########
response_MPC = amb_MPC
response_x0 = x0_amb
nonresponse_MPC_list = all_other_MPC
nonresponse_x0_list = all_other_x0
nonresponse_u_list = all_other_u
nonresponse_x_list = all_other_x
nonresponse_xd_list = all_other_x_des
################# Generate the warm starts ###############################
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
### Ambulance Warm Start
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = uamb
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((uamb[:, number_ctrl_pts_executed:], np.tile(uamb[:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
#######################################################################
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, None, nonresponse_MPC_list )
k_slack = 10000.0
k_CA = 0.000000000000000
k_CA_power = 4
wall_CA = True
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, None, nonresponse_x0_list, 1, slack=False)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
for i in range(n_other):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
### Solve the Optimization
# Debugging
# plot_range = [N]
# bri.opti.callback(lambda i: bri.debug_callback(i, plot_range))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try:
bri.solve(None, nonresponse_u_list)
x1, u1, x1_des, _, _, _, _, _, _ = bri.get_solution()
print("i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print("Dir:", subdir_name)
print("k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
uamb = u1
xamb = x1
xamb_des = x1_des
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print("Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
########### SOLVE FOR THE OTHER VEHICLES ON THE ROAD
if not XAMB_ONLY:
for i in range(len(all_other_MPC)):
response_MPC = all_other_MPC[i]
response_x0 = all_other_x0[i]
nonresponse_MPC_list = all_other_MPC[:i] + all_other_MPC[i+1:]
nonresponse_x0_list = all_other_x0[:i] + all_other_x0[i+1:]
nonresponse_u_list = all_other_u[:i] + all_other_u[i+1:]
nonresponse_x_list = all_other_x[:i] + all_other_x[i+1:]
nonresponse_xd_list = all_other_x_des[:i] + all_other_x_des[i+1:]
################ Warm Start
u_warm_profiles = mibr.generate_warm_u(N, response_MPC)
if i_rounds_ibr > 0: # warm start with the solution from the last IBR round
u_warm_profiles["previous"] = all_other_u[i]
else:
# take the control inputs of the last MPC and continue the ctrl
if i_mpc > 0:
u_warm_profiles["previous"] = np.concatenate((all_other_u[i][:, number_ctrl_pts_executed:], np.tile(all_other_u[i][:,-1:],(1, number_ctrl_pts_executed))),axis=1) ##
min_response_cost = 99999999
for k_warm in u_warm_profiles.keys():
u_warm = u_warm_profiles[k_warm]
x_warm, x_des_warm = response_MPC.forward_simulate_all(response_x0.reshape(6,1), u_warm)
bri = mibr.IterativeBestResponseMPCMultiple(response_MPC, amb_MPC, nonresponse_MPC_list)
bri.k_slack = k_slack
bri.k_CA = k_CA
bri.k_CA_power = k_CA_power
bri.world = world
bri.wall_CA = wall_CA
INFEASIBLE = True
bri.generate_optimization(N, T, response_x0, x0_amb, nonresponse_x0_list, 1, slack=False)
# for slack_var in bri.slack_vars_list: ## Added to constrain slacks
# bri.opti.subject_to(cas.vec(slack_var) <= 1.0)
bri.opti.set_initial(bri.u_opt, u_warm)
bri.opti.set_initial(bri.x_opt, x_warm)
bri.opti.set_initial(bri.x_desired, x_des_warm)
### Set the trajectories of the nonresponse vehicles (as given)
bri.opti.set_value(bri.xamb_opt, xamb)
for i in range(len(nonresponse_x_list)):
bri.opti.set_value(bri.allother_x_opt[i], nonresponse_x_list[i])
bri.opti.set_value(bri.allother_x_desired[i], nonresponse_xd_list[i])
# Debugging
# bri.opti.callback(lambda i: bri.debug_callback(i, [N]))
# bri.opti.callback(lambda i: print("J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost))))
try: ### Solve the Optimization
bri.solve(uamb, nonresponse_u_list)
x1_nr, u1_nr, x1_des_nr, _, _, _, _, _, _ = bri.get_solution()
print(" i_mpc %d n_round %d i %02d Cost %.02f Slack %.02f "%(i_mpc, i_rounds_ibr, i, bri.solution.value(bri.total_svo_cost), bri.solution.value(bri.slack_cost)))
print(" J_i %.03f, J_j %.03f, Slack %.03f, CA %.03f"%(bri.solution.value(bri.response_svo_cost), bri.solution.value(bri.other_svo_cost), bri.solution.value(bri.k_slack*bri.slack_cost), bri.solution.value(bri.k_CA*bri.collision_cost)))
print(" Dir:", subdir_name)
print(" k_warm", k_warm)
INFEASIBLE = False
if bri.solution.value(bri.slack_cost) < min_slack:
current_cost = bri.solution.value(bri.total_svo_cost)
if current_cost < min_response_cost:
all_other_u[i] = u1_nr
all_other_x = all_other_x[:i] + [x1_nr] + all_other_x[i:]
all_other_u = all_other_u[:i] + [u1_nr] + all_other_u[i:]
all_other_x_des = all_other_x_des[:i] + [x1_des_nr] + all_other_x_des[i:]
min_response_cost = current_cost
min_response_warm = k_warm
min_bri = bri
# file_name = folder + "data/"+'%03d'%ibr_sub_it
# mibr.save_state(file_name, xamb, uamb, xamb_des, all_other_x, all_other_u, all_other_x_des)
# mibr.save_costs(file_name, bri)
except RuntimeError:
print(" Infeasibility: k_warm %s"%k_warm)
# ibr_sub_it +=1
#
print(" IBR Done: Rd %02d / %02d"%(i_rounds_ibr, n_rounds_ibr))
file_name = folder + "data/"+'r%02d%03d'%(i_mpc, i_rounds_ibr)
if not INFEASIBLE:
mibr.save_state(file_name, xamb, uamb, xamb_des, xothers, uothers, xothers_des)
mibr.save_costs(file_name, bri)
actual_t = i_mpc * number_ctrl_pts_executed
actual_xamb[:,actual_t:actual_t+number_ctrl_pts_executed+1] = xamb[:,:number_ctrl_pts_executed+1]
print(" MPC Done: Rd %02d / %02d"%(i_mpc, n_rounds_mpc))
print(" Full MPC Solution", xamb[0:2,:])
print(" Executed MPC", xamb[0:2,:number_ctrl_pts_executed+1])
print(" Solution | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['TransformArgs', 'Transform']
@pulumi.input_type
class TransformArgs:
def __init__(__self__, *,
media_services_account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None):
"""
The set of arguments for constructing a Transform resource.
:param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
:param pulumi.Input[str] description: An optional verbose description of the Transform.
:param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created.
:param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined.
"""
pulumi.set(__self__, "media_services_account_name", media_services_account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if outputs is not None:
pulumi.set(__self__, "outputs", outputs)
@property
@pulumi.getter(name="mediaServicesAccountName")
def media_services_account_name(self) -> pulumi.Input[str]:
"""
The Media Services account name. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "media_services_account_name")
@media_services_account_name.setter
def media_services_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "media_services_account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional verbose description of the Transform.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Transform. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]:
"""
One or more `output` blocks as defined below. At least one `output` must be defined.
"""
return pulumi.get(self, "outputs")
@outputs.setter
def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]):
pulumi.set(self, "outputs", value)
@pulumi.input_type
class _TransformState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
media_services_account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Transform resources.
:param pulumi.Input[str] description: An optional verbose description of the Transform.
:param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created.
:param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created.
:param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if media_services_account_name is not None:
pulumi.set(__self__, "media_services_account_name", media_services_account_name)
if name is not None:
pulumi.set(__self__, "name", name)
if outputs is not None:
pulumi.set(__self__, "outputs", outputs)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional verbose description of the Transform.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="mediaServicesAccountName")
def media_services_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The Media Services account name. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "media_services_account_name")
@media_services_account_name.setter
def media_services_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "media_services_account_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Transform. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]:
"""
One or more `output` blocks as defined below. At least one `output` must be defined.
"""
return pulumi.get(self, "outputs")
@outputs.setter
def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]):
pulumi.set(self, "outputs", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
class Transform(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
media_services_account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Transform.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
)])
```
### With Multiple Outputs
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[
azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="ContinueJob",
audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
audio_language="en-US",
audio_analysis_mode="Basic",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="StopProcessingJob",
face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
analysis_resolution="StandardDefinition",
),
),
])
```
## Import
Transforms can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional verbose description of the Transform.
:param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created.
:param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TransformArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Transform.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
)])
```
### With Multiple Outputs
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="GRS")
example_service_account = azure.media.ServiceAccount("exampleServiceAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
storage_accounts=[azure.media.ServiceAccountStorageAccountArgs(
id=example_account.id,
is_primary=True,
)])
example_transform = azure.media.Transform("exampleTransform",
resource_group_name=example_resource_group.name,
media_services_account_name=example_service_account.name,
description="My transform description",
outputs=[
azure.media.TransformOutputArgs(
relative_priority="Normal",
on_error_action="ContinueJob",
builtin_preset=azure.media.TransformOutputBuiltinPresetArgs(
preset_name="AACGoodQualityAudio",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="ContinueJob",
audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs(
audio_language="en-US",
audio_analysis_mode="Basic",
),
),
azure.media.TransformOutputArgs(
relative_priority="Low",
on_error_action="StopProcessingJob",
face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs(
analysis_resolution="StandardDefinition",
),
),
])
```
## Import
Transforms can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1
```
:param str resource_name: The name of the resource.
:param TransformArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TransformArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
media_services_account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TransformArgs.__new__(TransformArgs)
__props__.__dict__["description"] = description
if media_services_account_name is None and not opts.urn:
raise TypeError("Missing required property | |
__author__ = '<EMAIL> @spazvt'
__author__ = '<NAME> @alice_und_bob'
from datetime import datetime
import os
import logging
from pathlib import Path
import simplejson as json
import io
import eth_utils
from numpy.core.defchararray import lower
from subscrape.decode.decode_evm_transaction import decode_tx
from subscrape.decode.decode_evm_log import decode_log
class MoonbeamScraper:
def __init__(self, db_path, moonscan_api, blockscout_api):
self.logger = logging.getLogger("MoonbeamScraper")
self.db_path = db_path
self.moonscan_api = moonscan_api
self.blockscout_api = blockscout_api
self.transactions = {}
self.abis = {} # cache of contract ABI interface definitions
self.contracts_with_known_decode_errors = []
self.tokens = {} # cache of token contract basic info
def scrape(self, operations, chain_config):
for operation in operations:
# ignore metadata
if operation.startswith("_"):
continue
if operation == "transactions":
contracts = operations[operation]
transactions_config = chain_config.create_inner_config(contracts)
if transactions_config.skip:
self.logger.info(f"Config asks to skip transactions.")
continue
for contract in contracts:
# ignore metadata
if operation.startswith("_"):
continue
methods = contracts[contract]
contract_config = transactions_config.create_inner_config(methods)
if contract_config.skip:
self.logger.info(f"Config asks to skip transactions of contract {contract}.")
continue
for method in methods:
# ignore metadata
if operation.startswith("_"):
continue
# deduce config
if type(methods) is dict:
method_config = contract_config.create_inner_config(methods[method])
else:
method_config = contract_config
# config wants us to skip this call?
if method_config.skip:
self.logger.info(f"Config asks to skip contract {contract} method {method}")
continue
contract_method = f"{contract}_{method}"
assert(contract_method not in self.transactions)
self.transactions[contract_method] = {}
processor = self.process_methods_in_transaction_factory(contract_method, method)
self.fetch_transactions(contract, processor, contract_method)
elif operation == "account_transactions":
account_transactions_payload = operations[operation]
account_transactions_config = chain_config.create_inner_config(account_transactions_payload)
if account_transactions_config.skip:
self.logger.info(f"Config asks to skip account_transactions.")
continue
if "accounts" in account_transactions_payload:
accounts = account_transactions_payload['accounts']
for account in accounts:
# ignore metadata
if account.startswith("_"):
continue
# deduce config
if type(accounts) is dict:
account_config = account_transactions_config.create_inner_config(methods[method])
else:
account_config = account_transactions_config
if account_config.skip:
self.logger.info(f"Config asks to skip account {account}")
continue
self.transactions[account] = {}
processor = self.process_transactions_on_account_factory(account)
self.fetch_transactions(account, processor)
else:
self.logger.error(f"'accounts' not listed in config for operation '{operation}'.")
else:
self.logger.error(f"config contained an operation that does not exist: {operation}")
exit
def fetch_transactions(self, address, processor, reference=None):
"""Fetch all transactions for a given address (account/contract) and use the given processor method to filter
or post-process each transaction as we work through them. Optionally, use 'reference' to uniquely identify this
set of post-processed transaction data.
:param address: the moonriver/moonbeam account number of interest. This could be a basic account, or a contract
address, depending on the kind of transactions being analyzed.
:type address: str
:param processor: a method that is used to post-process every transaction for the given address as it is
retrieved from the API. Processing transactions as they come in, instead of storing all transaction data helps
cut down on required storage.
:type processor: function
:param reference: (optional) Unique identifier for this set of post-processed transaction data being created,
if necessary.
:type reference: str
"""
if reference is None:
reference = address
else:
reference = reference.replace(" ", "_")
file_path = self.db_path + f"{reference}.json"
if os.path.exists(file_path):
self.logger.warning(f"{file_path} already exists. Skipping.")
return
self.logger.info(f"Fetching transactions for {reference} from {self.moonscan_api.endpoint}")
self.moonscan_api.fetch_and_process_transactions(address, processor)
payload = json.dumps(self.transactions[reference], indent=4, sort_keys=False)
file = io.open(file_path, "w")
file.write(payload)
file.close()
def process_methods_in_transaction_factory(self, contract_method, method):
def process_method_in_transaction(transaction):
"""Process each transaction from a specific method of a contract, counting the number of transactions for
each account.
:param transaction: all details for a specific transaction on the specified contract.
:type transaction: dict
"""
if transaction["input"][0:10] == method:
address = transaction["from"]
if address not in self.transactions[contract_method]:
self.transactions[contract_method][address] = 1
else:
self.transactions[contract_method][address] += 1
return process_method_in_transaction
def process_transactions_on_account_factory(self, account):
def process_transaction_on_account(transaction):
"""Process each transaction for an account, capturing the necessary info.
:param transaction: all details for a specific transaction on the specified account.
:type transaction: dict
"""
timestamp = transaction['timeStamp']
acct_tx = {'utcdatetime': str(datetime.utcfromtimestamp(int(timestamp))), 'hash': transaction['hash'],
'from': transaction['from'], 'to': transaction['to'], 'valueInWei': transaction['value'],
'value': eth_utils.from_wei(int(transaction['value']), 'ether'), 'gas': transaction['gas'],
'gasPrice': transaction['gasPrice'], 'gasUsed': transaction['gasUsed']}
self.transactions[account][timestamp] = acct_tx
if 'input' in transaction and len(transaction['input']) >= 8:
# assume this was a call to a contract since input data was provided
contract_address = transaction['to']
self.retrieve_and_cache_contract_abi(contract_address)
if contract_address in self.abis and self.abis[contract_address] is not None:
decoded_transaction = decode_tx(contract_address, transaction['input'], self.abis[contract_address])
if decoded_transaction[0] == 'decode error':
if contract_address not in self.contracts_with_known_decode_errors:
self.contracts_with_known_decode_errors.append(contract_address)
decode_traceback = decoded_transaction[1]
self.logger.warning(f'Unable to decode contract interaction with contract '
f'{contract_address} in transaction:\r\n'
f'{transaction}\r\n\r\n'
f'{decode_traceback}\r\n'
f'---- Now continuing processing the rest of the transactions ----\r\n')
else:
# successfully decoded the input data to the contract interaction
contract_method_name = decoded_transaction[0]
decoded_func_params = json.loads(decoded_transaction[1])
# todo: add support for "swapETHForTokens" methods (which don't specify an input quantity?)
# todo: interpret liquidity provisioning and other events
if contract_method_name in {'swapExactTokensForTokens', 'swapTokensForExactTokens',
'swapExactTokensForETH', 'swapTokensForExactETH',
'swapExactTokensForTokensSupportingFeeOnTransferTokens',
'swapExactTokensForETHSupportingFeeOnTransferTokens'}:
token_path = decoded_func_params['path']
# retrieve and cache the token info for all tokens
for token in token_path:
if token not in self.tokens:
self.tokens[token] = self.blockscout_api.get_token_info(token)
input_token = token_path[0]
input_token_info = self.tokens[input_token]
self.transactions[account][timestamp]['input_token_name'] = input_token_info['name']
self.transactions[account][timestamp]['input_symbol'] = input_token_info['symbol']
output_token = token_path[len(token_path) - 1]
output_token_info = self.tokens[output_token]
self.transactions[account][timestamp]['output_token_name'] = output_token_info['name']
self.transactions[account][timestamp]['output_symbol'] = output_token_info['symbol']
if contract_method_name in {'swapExactTokensForTokens', 'swapExactTokensForETH',
'swapExactTokensForTokensSupportingFeeOnTransferTokens',
'swapExactTokensForETHSupportingFeeOnTransferTokens'}:
amount_in = decoded_func_params['amountIn']
amount_out = decoded_func_params['amountOutMin']
elif contract_method_name in {"swapTokensForExactTokens", "swapTokensForExactETH"}:
amount_in = decoded_func_params['amountInMax']
amount_out = decoded_func_params['amountOut']
else:
self.logger.error(f'contract method {contract_method_name} not recognized')
requested_input_quantity_float = amount_in / (10 ** int(input_token_info['decimals']))
requested_output_quantity_float = amount_out / (10 ** int(output_token_info['decimals']))
# We only have an estimate based on the inputs so far. Use the trace logs to find
# the exact swap quantities
tx_hash = transaction['hash']
receipt = self.moonscan_api.get_transaction_receipt(tx_hash)
if type(receipt) is not dict or 'logs' not in receipt:
self.logger.warning(f"For transaction {tx_hash} with contract {contract_address}, no"
f" logs/traces present for transaction receipt: {receipt}")
return
logs = receipt['logs']
decoded_logs = []
for log in logs:
contract_address = log['address']
contract_abi = self.retrieve_and_cache_contract_abi(contract_address)
if contract_address in self.abis and self.abis[contract_address] is not None:
(evt_name, decoded_event_data, schema) = decode_log(log['data'], log['topics'],
contract_abi)
if evt_name == 'decode error':
if contract_address not in self.contracts_with_known_decode_errors:
self.contracts_with_known_decode_errors.append(contract_address)
decode_traceback = decoded_transaction[1]
self.logger.warning(f'Unable to decode event log with contract '
f'{contract_address} in transaction:\r\n'
f'{transaction}\r\n\r\n'
f'{decode_traceback}\r\n'
f'---- Now continuing processing the rest of the'
f' transactions ----\r\n')
elif evt_name == 'no matching abi':
pass
else:
decoded_logs.append((evt_name, decoded_event_data, schema))
exact_input_quantity_int = 0
exact_output_quantity_int = 0
event_quantity_keywords = {'value', 'input', 'amount', 'wad'}
event_source_address_keywords = {'from', 'src'}
event_destination_address_keywords = {'to', 'dst'}
for (evt_name, decoded_event_data, schema) in decoded_logs:
decoded_event_params = json.loads(decoded_event_data)
if evt_name not in {'Transfer', 'Withdrawal'}:
continue
# Different DEXs might name their event parameters differently, so we have to be
# flexible in what dictionary keywords we use
keyword_found = False
quantity_keyword = None
for key in event_quantity_keywords:
if key in decoded_event_params:
quantity_keyword = key
keyword_found = True
continue
if not keyword_found:
self.logger.warning(f"For transaction {tx_hash} with contract {contract_address},"
f" no event keyword found for quantity. This indicates"
f" subscrape doesn't handle this particular contract"
f" implementation yet."
f" decoded_event_params={decoded_event_params}")
keyword_found = False
source_address_keyword = None
for key in event_source_address_keywords:
if key in decoded_event_params:
source_address_keyword = key
keyword_found = True
continue
if not keyword_found:
self.logger.warning(f"For transaction {tx_hash} with contract {contract_address},"
f" no event keyword found for source address. This indicates"
f" subscrape doesn't handle this particular contract"
f" implementation yet."
f" decoded_event_params={decoded_event_params}")
if evt_name == 'Transfer':
keyword_found = False
destination_address_keyword = None
for key in event_destination_address_keywords:
if key in decoded_event_params:
destination_address_keyword = key
keyword_found = True
continue
if not keyword_found:
self.logger.warning(f"For transaction {tx_hash} with contract"
f" {contract_address}, no Transfer event keyword found for"
f" destination address. This indicates subscrape doesn't"
f" handle this particular contract implementation yet."
f" decoded_event_params={decoded_event_params}")
if lower(decoded_event_params[source_address_keyword]) == lower(transaction['from']):
# Transfers from source acct to one or more swap LP pair contracts in
# order to perform swaps
exact_input_quantity_int += decoded_event_params[quantity_keyword]
elif lower(decoded_event_params[destination_address_keyword]) == lower(transaction['from']):
# Transfers from one or more swap LP pair contracts back to the original
# address (after swap has occurred)
exact_output_quantity_int += decoded_event_params[quantity_keyword]
elif evt_name == 'Withdrawal' and \
lower(decoded_event_params[source_address_keyword]) == lower(transaction['to']):
# Final withdrawal tx back to source addr. Not used on all DEXs.
exact_output_quantity_int += decoded_event_params[quantity_keyword]
exact_amount_in_float = exact_input_quantity_int / (
10 ** int(input_token_info['decimals']))
exact_amount_out_float = exact_output_quantity_int / (
10 ** int(output_token_info['decimals']))
# validate that the exact amounts are somewhat similar to the contract input values
# (to make sure we're matching up the right values).
input_tolerance = requested_input_quantity_float * 0.2 # 20% each side
self.transactions[account][timestamp]['input_quantity'] = exact_amount_in_float
if (exact_amount_in_float > | |
<reponame>tseaver/Zope-RFA<filename>src/Shared/DC/xml/ppml.py
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Provide conversion between Python pickles and XML
"""
from pickle import *
import struct
import base64
import re
from marshal import loads as mloads
from xyap import NoBlanks
from xyap import xyap
binary = re.compile('[^\x1f-\x7f]').search
def escape(s, encoding='repr'):
if binary(s) and isinstance(s, str):
s = base64.encodestring(s)[:-1]
encoding = 'base64'
elif '>' in s or '<' in s or '&' in s:
if not ']]>' in s:
s = '<![CDATA[' + s + ']]>'
encoding = 'cdata'
else:
s = s.replace('&', '&')
s = s.replace('>', '>')
s = s.replace('<', '<')
return encoding, s
def unescape(s, encoding):
if encoding == 'base64':
return base64.decodestring(s)
else:
s = s.replace('<', '<')
s = s.replace('>', '>')
return s.replace('&', '&')
class Global:
def __init__(self, module, name):
self.module = module
self.name = name
def __str__(self, indent=0):
if hasattr(self, 'id'):
id = ' id="%s"' % self.id
else:
id = ''
name = self.__class__.__name__.lower()
return '%s<%s%s name="%s" module="%s"/>\n' % (
' ' * indent, name, id, self.name, self.module)
class Scalar:
def __init__(self, v):
self._v = v
def value(self):
return self._v
def __str__(self, indent=0):
if hasattr(self, 'id'):
id = ' id="%s"' % self.id
else:
id = ''
name = self.__class__.__name__.lower()
return '%s<%s%s>%s</%s>\n' % (
' ' * indent, name, id, self.value(), name)
class Long(Scalar):
def value(self):
result = str(self._v)
if result[-1:] == 'L':
return result[:-1]
return result
class String(Scalar):
def __init__(self, v, encoding=''):
encoding, v = escape(v, encoding)
self.encoding = encoding
self._v = v
def __str__(self, indent=0):
if hasattr(self,'id'):
id = ' id="%s"' % self.id
else:
id = ''
if hasattr(self, 'encoding'):
encoding = ' encoding="%s"' % self.encoding
else:
encoding = ''
name = self.__class__.__name__.lower()
return '%s<%s%s%s>%s</%s>\n' % (
' ' * indent, name, id, encoding, self.value(), name)
class Unicode(String):
def __init__(self, v, encoding):
v = unicode(v, encoding)
String.__init__(self, v)
def value(self):
return self._v.encode('utf-8')
class Wrapper:
def __init__(self, v):
self._v = v
def value(self):
return self._v
def __str__(self, indent=0):
if hasattr(self, 'id'):
id = ' id="%s"' % self.id
else:
id = ''
name = self.__class__.__name__.lower()
v = self._v
i = ' ' * indent
if isinstance(v, Scalar):
return '%s<%s%s>%s</%s>\n' % (i, name, id, str(v)[:-1], name)
else:
try:
v = v.__str__(indent + 2)
except TypeError:
v = v.__str__()
return '%s<%s%s>\n%s%s</%s>\n' % (i, name, id, v, i, name)
class Collection:
def __str__(self, indent=0):
if hasattr(self, 'id'):
id = ' id="%s"' % self.id
else:
id = ''
name = self.__class__.__name__.lower()
i = ' ' * indent
if self:
return '%s<%s%s>\n%s%s</%s>\n' % (
i, name, id, self.value(indent + 2), i, name)
else:
return '%s<%s%s/>\n' % (i, name, id)
class Dictionary(Collection):
def __init__(self):
self._d = []
def __len__(self):
return len(self._d)
def __setitem__(self, k, v):
self._d.append((k, v))
def value(self, indent):
return ''.join(
map(lambda i, ind=' ' * indent, indent=indent + 4:
'%s<item>\n'
'%s'
'%s'
'%s</item>\n'
%
(ind,
Key(i[0]).__str__(indent),
Value(i[1]).__str__(indent),
ind),
self._d
))
class Sequence(Collection):
def __init__(self, v=None):
if not v:
v = []
self._subs = v
def __len__(self):
return len(self._subs)
def append(self, v):
self._subs.append(v)
def extend(self, v):
self._subs.extend(v)
def _stringify(self, v, indent):
try:
return v.__str__(indent + 2)
except TypeError:
return v.__str__()
def value(self, indent):
return ''.join(map(
lambda v, indent=indent: self._stringify(v, indent),
self._subs))
class none:
def __str__(self, indent=0):
return ' ' * indent + '<none/>\n'
none = none()
class Reference(Scalar):
def __init__(self, v):
self._v = v
def __str__(self, indent=0):
v = self._v
name = self.__class__.__name__.lower()
return '%s<%s id="%s"/>\n' % (' ' * indent, name, v)
Get = Reference
class Object(Sequence):
def __init__(self, klass, args):
self._subs = [Klass(klass), args]
def __setstate__(self, v):
self.append(State(v))
class Int(Scalar): pass
class Float(Scalar): pass
class List(Sequence): pass
class Tuple(Sequence): pass
class Key(Wrapper): pass
class Value(Wrapper): pass
class Klass(Wrapper): pass
class State(Wrapper): pass
class Pickle(Wrapper): pass
class Persistent(Wrapper): pass
class ToXMLUnpickler(Unpickler):
def load(self):
return Pickle(Unpickler.load(self))
dispatch = {}
dispatch.update(Unpickler.dispatch)
def persistent_load(self, v):
return Persistent(v)
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(String(pid)))
dispatch[PERSID] = load_persid
def load_none(self):
self.append(none)
dispatch[NONE] = load_none
def load_int(self):
self.append(Int(int(self.readline()[:-1])))
dispatch[INT] = load_int
def load_binint(self):
self.append(Int(mloads('i' + self.read(4))))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(Int(ord(self.read(1))))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(Int(mloads('i' + self.read(2) + '\000\000')))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(Long(long(self.readline()[:-1], 0)))
dispatch[LONG] = load_long
def load_float(self):
self.append(Float(float(self.readline()[:-1])))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(Float(unpack('>d', self.read(8))[0]))
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'":
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, 'insecure string pickle'
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, 'insecure string pickle'
self.append(String(rep.decode('string-escape')))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(String(self.read(len)))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(Unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(Unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(String(self.read(len)))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [Tuple(self.stack[k + 1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(Tuple())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_empty_list(self):
self.stack.append(List())
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append(Dictionary())
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [List(self.stack[k + 1:])]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = Dictionary()
items = self.stack[k + 1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i + 1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
def load_inst(self):
k = self.marker()
args = Tuple(self.stack[k + 1:])
del self.stack[k:]
module = self.readline()[:-1]
name = self.readline()[:-1]
value = Object(Global(module, name), args)
self.append(value)
dispatch[INST] = load_inst
def load_obj(self):
stack = self.stack
k = self.marker()
klass = stack[k + 1]
del stack[k + 1]
args = Tuple(stack[k + 1:])
del stack[k:]
value = Object(klass, args)
self.append(value)
dispatch[OBJ] = load_obj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
self.append(Global(module, name))
dispatch[GLOBAL] = load_global
def load_reduce(self):
stack = self.stack
callable = stack[-2]
arg_tup = stack[-1]
del stack[-2:]
value = Object(callable, arg_tup)
self.append(value)
dispatch[REDUCE] = load_reduce
idprefix=''
def load_get(self):
self.append(Get(self.idprefix + self.readline()[:-1]))
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(Get(self.idprefix + repr(i)))
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(Get(self.idprefix + repr(i)))
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.stack[-1].id = self.idprefix + self.readline()[:-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
last = self.stack[-1]
if getattr(last, 'id', last) is last:
last.id = self.idprefix + repr(i)
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
last = self.stack[-1]
if getattr(last, 'id', last) is last:
last.id = self.idprefix + repr(i)
dispatch[LONG_BINPUT] = load_long_binput
def ToXMLload(file):
return ToXMLUnpickler(file).load()
def ToXMLloads(str):
from StringIO import StringIO
file = StringIO(str)
return ToXMLUnpickler(file).load()
def name(self, tag, data):
return ''.join(data[2:]).strip()
def start_pickle(self, tag, attrs):
self._pickleids = {}
return [tag, attrs]
def save_int(self, tag, data):
if self.binary:
v = int(name(self, tag, data))
if v >= 0:
if v <= 0xff:
return BININT1 + chr(v)
if v <= 0xffff:
return '%c%c%c' % (BININT2, v & 0xff, v >> 8)
hb = v >> 31
if hb == 0 or hb == -1:
return BININT + struct.pack('<i', v)
return INT + name(self, tag, data) + '\n'
def save_float(self, tag, data):
if self.binary:
return BINFLOAT + struct.pack('>d', float(name(self, tag, data)))
else:
return FLOAT + name(self, tag, data) + '\n'
def save_put(self, v, attrs):
id = attrs.get('id', '')
if id:
prefix = id.rfind('.')
if prefix >= 0:
id = id[prefix + 1:]
elif id[0] == 'i':
id = id[1:]
if self.binary:
id = int(id)
if id < 256:
id = BINPUT + chr(id)
else:
id = LONG_BINPUT + struct.pack('<i', id)
else:
id = PUT + repr(id) + '\n'
return v + id
return v
def save_string(self, tag, data):
a = data[1]
v = ''.join(data[2:])
encoding = a['encoding']
if encoding is not '':
v = unescape(v, encoding)
if self.binary:
l = len(v)
if l < 256:
v = SHORT_BINSTRING + chr(l) + v
else:
v = BINSTRING + struct.pack('<i', l) + v
else:
v = STRING + repr(v) + '\n'
return save_put(self, v, a)
def save_unicode(self, tag, data):
a = data[1]
v = ''.join(data[2:])
encoding = a['encoding']
| |
# coding: utf-8
"""
"""
from copy import deepcopy
import datetime
import io
import json
import os
import flask
import flask_login
import itsdangerous
import werkzeug.utils
from . import frontend
from .. import logic
from ..logic import user_log, object_log, comments, object_sorting
from ..logic.actions import ActionType, get_action
from ..logic.action_permissions import get_user_action_permissions
from ..logic.object_permissions import Permissions, get_user_object_permissions, object_is_public, get_object_permissions_for_users, set_object_public, set_user_object_permissions, set_group_object_permissions, set_project_object_permissions, get_objects_with_permissions, get_object_permissions_for_groups, get_object_permissions_for_projects, request_object_permissions
from ..logic.datatypes import JSONEncoder
from ..logic.users import get_user, get_users, get_users_by_name
from ..logic.schemas import validate, generate_placeholder
from ..logic.settings import get_user_settings, set_user_settings
from ..logic.object_search import generate_filter_func, wrap_filter_func
from ..logic.groups import get_group, get_user_groups
from ..logic.objects import create_object, create_object_batch, update_object, get_object, get_object_versions
from ..logic.object_log import ObjectLogEntryType
from ..logic.projects import get_project, get_user_projects, get_user_project_permissions
from ..logic.locations import get_location, get_object_ids_at_location, get_object_location_assignment, get_object_location_assignments, get_locations, assign_location_to_object, get_locations_tree
from ..logic.files import FileLogEntryType
from ..logic.errors import GroupDoesNotExistError, ObjectDoesNotExistError, UserDoesNotExistError, ActionDoesNotExistError, ValidationError, ProjectDoesNotExistError, LocationDoesNotExistError
from .objects_forms import ObjectPermissionsForm, ObjectForm, ObjectVersionRestoreForm, ObjectUserPermissionsForm, CommentForm, ObjectGroupPermissionsForm, ObjectProjectPermissionsForm, FileForm, FileInformationForm, FileHidingForm, ObjectLocationAssignmentForm, ExternalLinkForm, ObjectPublicationForm
from ..utils import object_permissions_required
from .utils import jinja_filter, generate_qrcode
from .object_form_parser import parse_form_data
from .labels import create_labels
from .pdfexport import create_pdfexport
from .utils import check_current_user_is_not_readonly
__author__ = '<NAME> <<EMAIL>>'
def on_unauthorized(object_id):
permissions_by_user = get_object_permissions_for_users(object_id)
has_grant_user = any(
Permissions.GRANT in permissions
for permissions in permissions_by_user.values()
)
return flask.render_template('objects/unauthorized.html', object_id=object_id, has_grant_user=has_grant_user), 403
@frontend.route('/objects/')
@flask_login.login_required
def objects():
object_ids = flask.request.args.get('ids', '')
objects = []
if object_ids:
object_ids = object_ids.split(',')
try:
object_ids = [int(object_id) for object_id in object_ids]
except ValueError:
object_ids = []
readable_object_ids = []
for object_id in object_ids:
if Permissions.READ in get_user_object_permissions(object_id, user_id=flask_login.current_user.id):
readable_object_ids.append(object_id)
object_ids = readable_object_ids
for object_id in object_ids:
try:
objects.append(get_object(object_id))
except logic.errors.ObjectDoesNotExistError:
pass
action_id = None
action = None
action_type = None
project_id = None
location_id = None
location = None
user = None
user_id = None
doi = None
object_ids_at_location = None
project = None
query_string = ''
use_advanced_search = False
must_use_advanced_search = False
advanced_search_had_error = False
search_notes = []
search_tree = None
limit = None
offset = None
pagination_enabled = True
num_objects_found = len(objects)
sorting_property_name = None
sorting_order_name = None
else:
pagination_enabled = True
try:
user_id = int(flask.request.args.get('user', ''))
user = get_user(user_id)
except ValueError:
user_id = None
user = None
except UserDoesNotExistError:
user_id = None
user = None
try:
doi = logic.publications.simplify_doi(flask.request.args.get('doi', ''))
except logic.errors.InvalidDOIError:
doi = None
try:
location_id = int(flask.request.args.get('location', ''))
location = get_location(location_id)
object_ids_at_location = get_object_ids_at_location(location_id)
except ValueError:
location_id = None
location = None
object_ids_at_location = None
except LocationDoesNotExistError:
location_id = None
location = None
object_ids_at_location = None
try:
action_id = int(flask.request.args.get('action', ''))
except ValueError:
action_id = None
if action_id is not None:
action = get_action(action_id)
action_type = action.type
else:
action = None
action_type = flask.request.args.get('t', '')
action_type = {
'samples': ActionType.SAMPLE_CREATION,
'measurements': ActionType.MEASUREMENT,
'simulations': ActionType.SIMULATION
}.get(action_type, None)
try:
project_id = int(flask.request.args.get('project', ''))
except ValueError:
project_id = None
if project_id is not None:
if Permissions.READ not in get_user_project_permissions(project_id=project_id, user_id=flask_login.current_user.id, include_groups=True):
return flask.abort(403)
project = get_project(project_id)
else:
project = None
if flask.request.args.get('limit', '') == 'all':
limit = None
else:
try:
limit = int(flask.request.args.get('limit', ''))
except ValueError:
limit = None
else:
if limit <= 0:
limit = None
elif limit >= 1000:
limit = 1000
# default objects per page
if limit is None:
limit = get_user_settings(flask_login.current_user.id)['OBJECTS_PER_PAGE']
else:
set_user_settings(flask_login.current_user.id, {'OBJECTS_PER_PAGE': limit})
try:
offset = int(flask.request.args.get('offset', ''))
except ValueError:
offset = None
else:
if offset < 0:
offset = None
elif offset > 100000000:
offset = 100000000
if limit is not None and offset is None:
offset = 0
sorting_order_name = flask.request.args.get('order', None)
if sorting_order_name == 'asc':
sorting_order = object_sorting.ascending
elif sorting_order_name == 'desc':
sorting_order = object_sorting.descending
else:
sorting_order = None
sorting_property_name = flask.request.args.get('sortby', None)
if sorting_order is None:
if sorting_property_name is None:
sorting_order_name = 'desc'
sorting_order = object_sorting.descending
else:
sorting_order_name = 'asc'
sorting_order = object_sorting.ascending
if sorting_property_name is None:
sorting_property_name = '_object_id'
if sorting_property_name == '_object_id':
sorting_property = object_sorting.object_id()
elif sorting_property_name == '_creation_date':
sorting_property = object_sorting.creation_date()
elif sorting_property_name == '_last_modification_date':
sorting_property = object_sorting.last_modification_date()
else:
sorting_property = object_sorting.property_value(sorting_property_name)
sorting_function = sorting_order(sorting_property)
query_string = flask.request.args.get('q', '')
search_tree = None
use_advanced_search = flask.request.args.get('advanced', None) is not None
must_use_advanced_search = use_advanced_search
advanced_search_had_error = False
additional_search_notes = []
if not use_advanced_search and query_string:
if user_id is None:
users = get_users_by_name(query_string)
if len(users) == 1:
user = users[0]
user_id = user.id
query_string = ''
elif len(users) > 1:
additional_search_notes.append(('error', "There are multiple users with this name.", 0, 0))
if doi is None and query_string.startswith('doi:'):
try:
doi = logic.publications.simplify_doi(query_string)
query_string = ''
except logic.errors.InvalidDOIError:
pass
try:
filter_func, search_tree, use_advanced_search = generate_filter_func(query_string, use_advanced_search)
except Exception:
# TODO: ensure that advanced search does not cause exceptions
if use_advanced_search:
advanced_search_had_error = True
def filter_func(data, search_notes):
""" Return all objects"""
search_notes.append(('error', "Unable to parse search expression".format(query_string), 0, len(query_string)))
return False
else:
raise
filter_func, search_notes = wrap_filter_func(filter_func)
search_notes.extend(additional_search_notes)
if user_id is None:
object_ids_for_user = None
else:
object_ids_for_user = user_log.get_user_related_object_ids(user_id)
if doi is None:
object_ids_for_doi = None
else:
object_ids_for_doi = logic.publications.get_object_ids_linked_to_doi(doi)
if use_advanced_search and not must_use_advanced_search:
search_notes.append(('info', "The advanced search was used automatically. Search for \"{}\" to use the simple search.".format(query_string), 0, 0))
try:
object_ids = None
if object_ids_at_location is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_at_location)
if object_ids_for_user is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_user)
if object_ids_for_doi is not None:
if object_ids is None:
object_ids = set()
object_ids = object_ids.union(object_ids_for_doi)
if object_ids:
pagination_enabled = False
limit = None
offset = None
num_objects_found_list = []
objects = get_objects_with_permissions(
user_id=flask_login.current_user.id,
permissions=Permissions.READ,
filter_func=filter_func,
sorting_func=sorting_function,
limit=limit,
offset=offset,
action_id=action_id,
action_type=action_type,
project_id=project_id,
object_ids=object_ids,
num_objects_found=num_objects_found_list
)
num_objects_found = num_objects_found_list[0]
except Exception as e:
search_notes.append(('error', "Error during search: {}".format(e), 0, 0))
objects = []
num_objects_found = 0
if any(note[0] == 'error' for note in search_notes):
objects = []
advanced_search_had_error = True
for i, obj in enumerate(objects):
if obj.version_id == 0:
original_object = obj
else:
original_object = get_object(object_id=obj.object_id, version_id=0)
objects[i] = {
'object_id': obj.object_id,
'version_id': obj.version_id,
'created_by': get_user(original_object.user_id),
'created_at': original_object.utc_datetime.strftime('%Y-%m-%d'),
'modified_by': get_user(obj.user_id),
'last_modified_at': obj.utc_datetime.strftime('%Y-%m-%d'),
'data': obj.data,
'schema': obj.schema,
'action': get_action(obj.action_id),
'display_properties': {}
}
# TODO: select display_properties? nested display_properties? find common properties? use searched for properties?
display_properties = []
display_property_titles = {}
sample_ids = set()
measurement_ids = set()
if action is not None:
action_schema = action.schema
display_properties = action_schema.get('displayProperties', [])
for property_name in display_properties:
display_property_titles[property_name] = action_schema['properties'][property_name]['title']
for obj in objects:
for property_name in display_properties:
if property_name not in obj['data'] or '_type' not in obj['data'][property_name] or property_name not in obj['schema']['properties']:
obj['display_properties'][property_name] = None
continue
obj['display_properties'][property_name] = (obj['data'][property_name], obj['schema']['properties'][property_name])
if obj['schema']['properties'][property_name]['type'] == 'sample':
sample_ids.add(obj['data'][property_name]['object_id'])
elif obj['schema']['properties'][property_name]['type'] == 'measurement':
measurement_ids.add(obj['data'][property_name]['object_id'])
samples = {
sample_id: get_object(object_id=sample_id)
for sample_id in sample_ids
}
measurements = {
measurement_id: get_object(object_id=measurement_id)
for measurement_id in measurement_ids
}
if action_id is None:
show_action = True
else:
show_action = False
def build_modified_url(**kwargs):
return flask.url_for(
'.objects',
**{k: v for k, v in flask.request.args.items() if k not in kwargs},
**kwargs
)
return flask.render_template(
'objects/objects.html',
objects=objects,
display_properties=display_properties,
display_property_titles=display_property_titles,
search_query=query_string,
action=action,
action_id=action_id,
action_type=action_type,
ActionType=ActionType,
project=project,
project_id=project_id,
location_id=location_id,
location=location,
user_id=user_id,
user=user,
doi=doi,
samples=samples,
measurements=measurements,
build_modified_url=build_modified_url,
sorting_property=sorting_property_name,
sorting_order=sorting_order_name,
limit=limit,
offset=offset,
pagination_enabled=pagination_enabled,
num_objects_found=num_objects_found,
show_action=show_action,
use_advanced_search=use_advanced_search,
must_use_advanced_search=must_use_advanced_search,
advanced_search_had_error=advanced_search_had_error,
search_notes=search_notes,
search_tree=search_tree
)
@jinja_filter
def to_datatype(obj):
return json.loads(json.dumps(obj), object_hook=JSONEncoder.object_hook)
def get_sub_data_and_schema(data, schema, id_prefix):
sub_data = data
sub_schema = schema
try:
for key in id_prefix.split('__'):
if sub_schema['type'] == 'array':
key = int(key)
sub_schema = sub_schema['items']
elif sub_schema['type'] == 'object':
sub_schema = sub_schema['properties'][key]
else:
raise ValueError('invalid type')
if isinstance(key, int):
while key >= len(sub_data):
sub_data.append(generate_placeholder(sub_schema))
elif key not in sub_data:
sub_data[key] = generate_placeholder(sub_schema)
sub_data = sub_data[key]
if sub_schema['type'] != 'array':
raise ValueError('invalid type')
except (ValueError, KeyError, IndexError, TypeError):
# TODO: error handling/logging?
raise ValueError('invalid action')
return sub_data, sub_schema
def apply_action_to_form_data(action, form_data):
new_form_data = form_data
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type == 'delete':
deleted_item_index = int(action_index)
parent_id_prefix = action_id_prefix
new_form_data = {}
for name in form_data:
if not name.startswith(parent_id_prefix):
new_form_data[name] = form_data[name]
else:
item_index, id_suffix = name[len(parent_id_prefix) + 2:].split('__', 1)
item_index = int(item_index)
if item_index < deleted_item_index:
new_form_data[name] = form_data[name]
if item_index > deleted_item_index:
new_name = parent_id_prefix + '__' + str(item_index - 1) + '__' + id_suffix
new_form_data[new_name] = form_data[name]
return new_form_data
def apply_action_to_data(action, data, schema):
action_id_prefix, action_index, action_type = action[len('action_'):].rsplit('__', 2)
if action_type not in ('add', 'delete', 'addcolumn', 'deletecolumn'):
raise ValueError('invalid action')
sub_data, sub_schema = get_sub_data_and_schema(data, schema, action_id_prefix.split('__', 1)[1])
if action_type in ('addcolumn', 'deletecolumn') and (sub_schema["style"] != "table" or sub_schema["items"]["type"] != | |
Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListResourceComputertypefamilyResponse:
"""
Description: 查询云服务器规格族列表
Summary: 查询云服务器规格族列表
"""
UtilClient.validate_model(request)
return cas_models.ListResourceComputertypefamilyResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.resource.computertypefamily.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_unifiedresource_vswitch(
self,
request: cas_models.ListUnifiedresourceVswitchRequest,
) -> cas_models.ListUnifiedresourceVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_unifiedresource_vswitch_ex(request, headers, runtime)
async def list_unifiedresource_vswitch_async(
self,
request: cas_models.ListUnifiedresourceVswitchRequest,
) -> cas_models.ListUnifiedresourceVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_unifiedresource_vswitch_ex_async(request, headers, runtime)
def list_unifiedresource_vswitch_ex(
self,
request: cas_models.ListUnifiedresourceVswitchRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListUnifiedresourceVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
UtilClient.validate_model(request)
return cas_models.ListUnifiedresourceVswitchResponse().from_map(
self.do_request('1.0', 'antcloud.cas.unifiedresource.vswitch.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_unifiedresource_vswitch_ex_async(
self,
request: cas_models.ListUnifiedresourceVswitchRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListUnifiedresourceVswitchResponse:
"""
Description: 查询交换机信息
Summary: 查询交换机信息
"""
UtilClient.validate_model(request)
return cas_models.ListUnifiedresourceVswitchResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.unifiedresource.vswitch.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_unifiedresource_vpc(
self,
request: cas_models.ListUnifiedresourceVpcRequest,
) -> cas_models.ListUnifiedresourceVpcResponse:
"""
Description: 查询当前region的vpc列表
Summary: 查询region vpc列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_unifiedresource_vpc_ex(request, headers, runtime)
async def list_unifiedresource_vpc_async(
self,
request: cas_models.ListUnifiedresourceVpcRequest,
) -> cas_models.ListUnifiedresourceVpcResponse:
"""
Description: 查询当前region的vpc列表
Summary: 查询region vpc列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_unifiedresource_vpc_ex_async(request, headers, runtime)
def list_unifiedresource_vpc_ex(
self,
request: cas_models.ListUnifiedresourceVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListUnifiedresourceVpcResponse:
"""
Description: 查询当前region的vpc列表
Summary: 查询region vpc列表
"""
UtilClient.validate_model(request)
return cas_models.ListUnifiedresourceVpcResponse().from_map(
self.do_request('1.0', 'antcloud.cas.unifiedresource.vpc.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_unifiedresource_vpc_ex_async(
self,
request: cas_models.ListUnifiedresourceVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListUnifiedresourceVpcResponse:
"""
Description: 查询当前region的vpc列表
Summary: 查询region vpc列表
"""
UtilClient.validate_model(request)
return cas_models.ListUnifiedresourceVpcResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.unifiedresource.vpc.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def import_resource_vpc(
self,
request: cas_models.ImportResourceVpcRequest,
) -> cas_models.ImportResourceVpcResponse:
"""
Description: 导入vpc到workspace
Summary: 导入vpc
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.import_resource_vpc_ex(request, headers, runtime)
async def import_resource_vpc_async(
self,
request: cas_models.ImportResourceVpcRequest,
) -> cas_models.ImportResourceVpcResponse:
"""
Description: 导入vpc到workspace
Summary: 导入vpc
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.import_resource_vpc_ex_async(request, headers, runtime)
def import_resource_vpc_ex(
self,
request: cas_models.ImportResourceVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ImportResourceVpcResponse:
"""
Description: 导入vpc到workspace
Summary: 导入vpc
"""
UtilClient.validate_model(request)
return cas_models.ImportResourceVpcResponse().from_map(
self.do_request('1.0', 'antcloud.cas.resource.vpc.import', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def import_resource_vpc_ex_async(
self,
request: cas_models.ImportResourceVpcRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ImportResourceVpcResponse:
"""
Description: 导入vpc到workspace
Summary: 导入vpc
"""
UtilClient.validate_model(request)
return cas_models.ImportResourceVpcResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.resource.vpc.import', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def delete_resource_rule(
self,
request: cas_models.DeleteResourceRuleRequest,
) -> cas_models.DeleteResourceRuleResponse:
"""
Description: 删除安全组规则
Summary: 删除安全组规则
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.delete_resource_rule_ex(request, headers, runtime)
async def delete_resource_rule_async(
self,
request: cas_models.DeleteResourceRuleRequest,
) -> cas_models.DeleteResourceRuleResponse:
"""
Description: 删除安全组规则
Summary: 删除安全组规则
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.delete_resource_rule_ex_async(request, headers, runtime)
def delete_resource_rule_ex(
self,
request: cas_models.DeleteResourceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.DeleteResourceRuleResponse:
"""
Description: 删除安全组规则
Summary: 删除安全组规则
"""
UtilClient.validate_model(request)
return cas_models.DeleteResourceRuleResponse().from_map(
self.do_request('1.0', 'antcloud.cas.resource.rule.delete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def delete_resource_rule_ex_async(
self,
request: cas_models.DeleteResourceRuleRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.DeleteResourceRuleResponse:
"""
Description: 删除安全组规则
Summary: 删除安全组规则
"""
UtilClient.validate_model(request)
return cas_models.DeleteResourceRuleResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.resource.rule.delete', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_route_entry(
self,
request: cas_models.ListRouteEntryRequest,
) -> cas_models.ListRouteEntryResponse:
"""
Description: 查询路由表条目列表
Summary: 查询路由表条目列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_route_entry_ex(request, headers, runtime)
async def list_route_entry_async(
self,
request: cas_models.ListRouteEntryRequest,
) -> cas_models.ListRouteEntryResponse:
"""
Description: 查询路由表条目列表
Summary: 查询路由表条目列表
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_route_entry_ex_async(request, headers, runtime)
def list_route_entry_ex(
self,
request: cas_models.ListRouteEntryRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListRouteEntryResponse:
"""
Description: 查询路由表条目列表
Summary: 查询路由表条目列表
"""
UtilClient.validate_model(request)
return cas_models.ListRouteEntryResponse().from_map(
self.do_request('1.0', 'antcloud.cas.route.entry.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_route_entry_ex_async(
self,
request: cas_models.ListRouteEntryRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListRouteEntryResponse:
"""
Description: 查询路由表条目列表
Summary: 查询路由表条目列表
"""
UtilClient.validate_model(request)
return cas_models.ListRouteEntryResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.route.entry.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_available_disk(
self,
request: cas_models.ListAvailableDiskRequest,
) -> cas_models.ListAvailableDiskResponse:
"""
Description: 查询可用disk资源详细信息
Summary: 查询可用disk资源详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_available_disk_ex(request, headers, runtime)
async def list_available_disk_async(
self,
request: cas_models.ListAvailableDiskRequest,
) -> cas_models.ListAvailableDiskResponse:
"""
Description: 查询可用disk资源详细信息
Summary: 查询可用disk资源详细信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_available_disk_ex_async(request, headers, runtime)
def list_available_disk_ex(
self,
request: cas_models.ListAvailableDiskRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAvailableDiskResponse:
"""
Description: 查询可用disk资源详细信息
Summary: 查询可用disk资源详细信息
"""
UtilClient.validate_model(request)
return cas_models.ListAvailableDiskResponse().from_map(
self.do_request('1.0', 'antcloud.cas.available.disk.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_available_disk_ex_async(
self,
request: cas_models.ListAvailableDiskRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAvailableDiskResponse:
"""
Description: 查询可用disk资源详细信息
Summary: 查询可用disk资源详细信息
"""
UtilClient.validate_model(request)
return cas_models.ListAvailableDiskResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.available.disk.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def list_available_instancetype(
self,
request: cas_models.ListAvailableInstancetypeRequest,
) -> cas_models.ListAvailableInstancetypeResponse:
"""
Description: 查询可用实例类型信息
Summary: 查询可用实例类型信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.list_available_instancetype_ex(request, headers, runtime)
async def list_available_instancetype_async(
self,
request: cas_models.ListAvailableInstancetypeRequest,
) -> cas_models.ListAvailableInstancetypeResponse:
"""
Description: 查询可用实例类型信息
Summary: 查询可用实例类型信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.list_available_instancetype_ex_async(request, headers, runtime)
def list_available_instancetype_ex(
self,
request: cas_models.ListAvailableInstancetypeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAvailableInstancetypeResponse:
"""
Description: 查询可用实例类型信息
Summary: 查询可用实例类型信息
"""
UtilClient.validate_model(request)
return cas_models.ListAvailableInstancetypeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.available.instancetype.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def list_available_instancetype_ex_async(
self,
request: cas_models.ListAvailableInstancetypeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.ListAvailableInstancetypeResponse:
"""
Description: 查询可用实例类型信息
Summary: 查询可用实例类型信息
"""
UtilClient.validate_model(request)
return cas_models.ListAvailableInstancetypeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.available.instancetype.list', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_resourcemeta_node(
self,
request: cas_models.QueryResourcemetaNodeRequest,
) -> cas_models.QueryResourcemetaNodeResponse:
"""
Description: 查询node meta数据
Summary: 查询node meta数据
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_resourcemeta_node_ex(request, headers, runtime)
async def query_resourcemeta_node_async(
self,
request: cas_models.QueryResourcemetaNodeRequest,
) -> cas_models.QueryResourcemetaNodeResponse:
"""
Description: 查询node meta数据
Summary: 查询node meta数据
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_resourcemeta_node_ex_async(request, headers, runtime)
def query_resourcemeta_node_ex(
self,
request: cas_models.QueryResourcemetaNodeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryResourcemetaNodeResponse:
"""
Description: 查询node meta数据
Summary: 查询node meta数据
"""
UtilClient.validate_model(request)
return cas_models.QueryResourcemetaNodeResponse().from_map(
self.do_request('1.0', 'antcloud.cas.resourcemeta.node.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_resourcemeta_node_ex_async(
self,
request: cas_models.QueryResourcemetaNodeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryResourcemetaNodeResponse:
"""
Description: 查询node meta数据
Summary: 查询node meta数据
"""
UtilClient.validate_model(request)
return cas_models.QueryResourcemetaNodeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.resourcemeta.node.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_resourcemeta_appservice(
self,
request: cas_models.QueryResourcemetaAppserviceRequest,
) -> cas_models.QueryResourcemetaAppserviceResponse:
"""
Description: 查询应用服务meta数据
Summary: 查询应用服务meta数据
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_resourcemeta_appservice_ex(request, headers, runtime)
async def query_resourcemeta_appservice_async(
self,
request: cas_models.QueryResourcemetaAppserviceRequest,
) -> cas_models.QueryResourcemetaAppserviceResponse:
"""
Description: 查询应用服务meta数据
Summary: 查询应用服务meta数据
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_resourcemeta_appservice_ex_async(request, headers, runtime)
def query_resourcemeta_appservice_ex(
self,
request: cas_models.QueryResourcemetaAppserviceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryResourcemetaAppserviceResponse:
"""
Description: 查询应用服务meta数据
Summary: 查询应用服务meta数据
"""
UtilClient.validate_model(request)
return cas_models.QueryResourcemetaAppserviceResponse().from_map(
self.do_request('1.0', 'antcloud.cas.resourcemeta.appservice.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_resourcemeta_appservice_ex_async(
self,
request: cas_models.QueryResourcemetaAppserviceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.QueryResourcemetaAppserviceResponse:
"""
Description: 查询应用服务meta数据
Summary: 查询应用服务meta数据
"""
UtilClient.validate_model(request)
return cas_models.QueryResourcemetaAppserviceResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.resourcemeta.appservice.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def sync_computer(
self,
request: cas_models.SyncComputerRequest,
) -> cas_models.SyncComputerResponse:
"""
Description: 调用底层API同步状态,如果底层是删除的,则直接置为删除
Summary: 同步机器状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.sync_computer_ex(request, headers, runtime)
async def sync_computer_async(
self,
request: cas_models.SyncComputerRequest,
) -> cas_models.SyncComputerResponse:
"""
Description: 调用底层API同步状态,如果底层是删除的,则直接置为删除
Summary: 同步机器状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.sync_computer_ex_async(request, headers, runtime)
def sync_computer_ex(
self,
request: cas_models.SyncComputerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.SyncComputerResponse:
"""
Description: 调用底层API同步状态,如果底层是删除的,则直接置为删除
Summary: 同步机器状态
"""
UtilClient.validate_model(request)
return cas_models.SyncComputerResponse().from_map(
self.do_request('1.0', 'antcloud.cas.computer.sync', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def sync_computer_ex_async(
self,
request: cas_models.SyncComputerRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.SyncComputerResponse:
"""
Description: 调用底层API同步状态,如果底层是删除的,则直接置为删除
Summary: 同步机器状态
"""
UtilClient.validate_model(request)
return cas_models.SyncComputerResponse().from_map(
await self.do_request_async('1.0', 'antcloud.cas.computer.sync', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def sync_loadbalance(
self,
request: cas_models.SyncLoadbalanceRequest,
) -> cas_models.SyncLoadbalanceResponse:
"""
Description: 同步LB状态
Summary: 同步LB状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.sync_loadbalance_ex(request, headers, runtime)
async def sync_loadbalance_async(
self,
request: cas_models.SyncLoadbalanceRequest,
) -> cas_models.SyncLoadbalanceResponse:
"""
Description: 同步LB状态
Summary: 同步LB状态
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.sync_loadbalance_ex_async(request, headers, runtime)
def sync_loadbalance_ex(
self,
request: cas_models.SyncLoadbalanceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.SyncLoadbalanceResponse:
"""
Description: 同步LB状态
Summary: 同步LB状态
"""
UtilClient.validate_model(request)
return cas_models.SyncLoadbalanceResponse().from_map(
self.do_request('1.0', 'antcloud.cas.loadbalance.sync', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def sync_loadbalance_ex_async(
self,
request: cas_models.SyncLoadbalanceRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> cas_models.SyncLoadbalanceResponse:
"""
Description: 同步LB状态
Summary: 同步LB状态
"""
UtilClient.validate_model(request)
| |
<gh_stars>100-1000
""" Cisco_IOS_XR_sysadmin_clear_asr9k
This module contains definitions
for the Calvados model objects.
This module contains a collection of YANG
definitions for Cisco IOS\-XR SysAdmin configuration.
This module defines the top level container for
all 'clear' commands for Sysadmin.
Copyright(c) 2012\-2016 by Cisco Systems, Inc.
All rights reserved.
Copyright (c) 2012\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Clear(_Entity_):
"""
.. attribute:: controller
**type**\: :py:class:`Controller <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller>`
.. attribute:: plugin
**type**\: :py:class:`Plugin <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Plugin>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear, self).__init__()
self._top_entity = None
self.yang_name = "clear"
self.yang_parent_name = "Cisco-IOS-XR-sysadmin-clear-asr9k"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("controller", ("controller", Clear.Controller)), ("plugin", ("plugin", Clear.Plugin))])
self._leafs = OrderedDict()
self.controller = Clear.Controller()
self.controller.parent = self
self._children_name_map["controller"] = "controller"
self.plugin = Clear.Plugin()
self.plugin.parent = self
self._children_name_map["plugin"] = "plugin"
self._segment_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear, [], name, value)
class Controller(_Entity_):
"""
.. attribute:: switch
**type**\: :py:class:`Switch <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch>`
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller, self).__init__()
self.yang_name = "controller"
self.yang_parent_name = "clear"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("switch", ("switch", Clear.Controller.Switch))])
self._leafs = OrderedDict()
self.switch = Clear.Controller.Switch()
self.switch.parent = self
self._children_name_map["switch"] = "switch"
self._segment_path = lambda: "controller"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller, [], name, value)
class Switch(_Entity_):
"""
.. attribute:: oper
Control Ethernet switch operational data
**type**\: :py:class:`Oper <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch.Oper>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch, self).__init__()
self.yang_name = "switch"
self.yang_parent_name = "controller"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("oper", ("oper", Clear.Controller.Switch.Oper))])
self._leafs = OrderedDict()
self.oper = Clear.Controller.Switch.Oper()
self.oper.parent = self
self._children_name_map["oper"] = "oper"
self._segment_path = lambda: "switch"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch, [], name, value)
class Oper(_Entity_):
"""
Control Ethernet switch operational data.
.. attribute:: fdb
**type**\: :py:class:`Fdb <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch.Oper.Fdb>`
**config**\: False
.. attribute:: statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch.Oper.Statistics>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch.Oper, self).__init__()
self.yang_name = "oper"
self.yang_parent_name = "switch"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("fdb", ("fdb", Clear.Controller.Switch.Oper.Fdb)), ("statistics", ("statistics", Clear.Controller.Switch.Oper.Statistics))])
self._leafs = OrderedDict()
self.fdb = Clear.Controller.Switch.Oper.Fdb()
self.fdb.parent = self
self._children_name_map["fdb"] = "fdb"
self.statistics = Clear.Controller.Switch.Oper.Statistics()
self.statistics.parent = self
self._children_name_map["statistics"] = "statistics"
self._segment_path = lambda: "oper"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/switch/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch.Oper, [], name, value)
class Fdb(_Entity_):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch.Oper.Fdb.Location>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch.Oper.Fdb, self).__init__()
self.yang_name = "fdb"
self.yang_parent_name = "oper"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("location", ("location", Clear.Controller.Switch.Oper.Fdb.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "fdb"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/switch/oper/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch.Oper.Fdb, [], name, value)
class Location(_Entity_):
"""
.. attribute:: rack (key)
**type**\: :py:class:`EsdmaRackNumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaRackNumEnum>`
**config**\: False
.. attribute:: card (key)
**type**\: :py:class:`EsdmaCpu <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaCpu>`
**config**\: False
.. attribute:: switch_id (key)
**type**\: :py:class:`EsdmaSwitchTypeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaSwitchTypeEnum>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch.Oper.Fdb.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "fdb"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['rack','card','switch_id']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('rack', (YLeaf(YType.enumeration, 'rack'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaRackNumEnum', '')])),
('card', (YLeaf(YType.enumeration, 'card'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaCpu', '')])),
('switch_id', (YLeaf(YType.enumeration, 'switch-id'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaSwitchTypeEnum', '')])),
])
self.rack = None
self.card = None
self.switch_id = None
self._segment_path = lambda: "location" + "[rack='" + str(self.rack) + "']" + "[card='" + str(self.card) + "']" + "[switch-id='" + str(self.switch_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/switch/oper/fdb/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch.Oper.Fdb.Location, ['rack', 'card', 'switch_id'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch.Oper.Fdb.Location']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch.Oper.Fdb']['meta_info']
class Statistics(_Entity_):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Controller.Switch.Oper.Statistics.Location>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch.Oper.Statistics, self).__init__()
self.yang_name = "statistics"
self.yang_parent_name = "oper"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("location", ("location", Clear.Controller.Switch.Oper.Statistics.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "statistics"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/switch/oper/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch.Oper.Statistics, [], name, value)
class Location(_Entity_):
"""
.. attribute:: rack (key)
**type**\: :py:class:`EsdmaRackNumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaRackNumEnum>`
**config**\: False
.. attribute:: card (key)
**type**\: :py:class:`EsdmaCpu <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaCpu>`
**config**\: False
.. attribute:: switch_id (key)
**type**\: :py:class:`EsdmaSwitchTypeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types.EsdmaSwitchTypeEnum>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Controller.Switch.Oper.Statistics.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "statistics"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['rack','card','switch_id']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('rack', (YLeaf(YType.enumeration, 'rack'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaRackNumEnum', '')])),
('card', (YLeaf(YType.enumeration, 'card'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaCpu', '')])),
('switch_id', (YLeaf(YType.enumeration, 'switch-id'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_ethsw_esdma_types', 'EsdmaSwitchTypeEnum', '')])),
])
self.rack = None
self.card = None
self.switch_id = None
self._segment_path = lambda: "location" + "[rack='" + str(self.rack) + "']" + "[card='" + str(self.card) + "']" + "[switch-id='" + str(self.switch_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/controller/switch/oper/statistics/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Controller.Switch.Oper.Statistics.Location, ['rack', 'card', 'switch_id'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch.Oper.Statistics.Location']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch.Oper.Statistics']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch.Oper']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller.Switch']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_sysadmin_clear_asr9k as meta
return meta._meta_table['Clear.Controller']['meta_info']
class Plugin(_Entity_):
"""
.. attribute:: slot
**type**\: :py:class:`Slot <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Plugin.Slot>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Plugin, self).__init__()
self.yang_name = "plugin"
self.yang_parent_name = "clear"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("slot", ("slot", Clear.Plugin.Slot))])
self._leafs = OrderedDict()
self.slot = Clear.Plugin.Slot()
self.slot.parent = self
self._children_name_map["slot"] = "slot"
self._segment_path = lambda: "plugin"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Plugin, [], name, value)
class Slot(_Entity_):
"""
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_clear_asr9k.Clear.Plugin.Slot.Location>`
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Plugin.Slot, self).__init__()
self.yang_name = "slot"
self.yang_parent_name = "plugin"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("location", ("location", Clear.Plugin.Slot.Location))])
self._leafs = OrderedDict()
self.location = YList(self)
self._segment_path = lambda: "slot"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-clear-asr9k:clear/plugin/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Clear.Plugin.Slot, [], name, value)
class Location(_Entity_):
"""
.. attribute:: location (key)
**type**\: str
**config**\: False
"""
_prefix = 'calvados_clear'
_revision = '2017-11-10'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Clear.Plugin.Slot.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "slot"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['location']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('location', (YLeaf(YType.str, 'location'), ['str'])),
])
self.location = | |
careful about edge cases
bounds = bounds[[durations >= minLength]]
maxes = maxes[[durations >= minLength]]
events = events[[durations >= minLength]]
if maxLength is not None and len(events) > 0:
durations = (bounds[:,1] - bounds[:,0] + 1) * ds
# TODO: refactor [durations <= maxLength] but be careful about edge cases
bounds = bounds[[durations <= maxLength]]
maxes = maxes[[durations <= maxLength]]
events = events[[durations <= maxLength]]
if len(events) == 0:
bounds, maxes, events = [], [], []
logging.warning("no events satisfied criteria")
return bounds, maxes, events
# Now, since all that we care about are the larger windows, so we should get rid of repeats
_, unique_idx = np.unique(bounds[:,0], return_index=True)
bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold
maxes = maxes[unique_idx] # maximum value during event
events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold
return bounds, maxes, events
def signal_envelope1D(data, *, sigma=None, fs=None):
logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!")
return signal_envelope_1d(data, sigma=sigma, fs=fs)
def signal_envelope_1d(data, *, sigma=None, fs=None):
"""Finds the signal envelope by taking the absolute value
of the Hilbert transform
Parameters
----------
data : numpy array, list, or RegularlySampledAnalogSignalArray
Input data
If data is a numpy array, it is expected to have shape
(n_signals, n_samples)
If data is a list, it is expected to have length n_signals,
where each sublist has length n_samples, i.e. data is not
jagged
sigma : float, optional
Standard deviation of the Gaussian kernel used to
smooth the envelope after applying the Hilbert transform.
Units of seconds. Default is 4 ms
fs : float, optional
Sampling rate of the signal
Returns
-------
out : same type as the input object
An object containing the signal envelope
TODO: this is not yet epoch-aware!
UPDATE: this is actually epoch-aware by now!
"""
if sigma is None:
sigma = 0.004 # 4 ms standard deviation
if fs is None:
if isinstance(data, (np.ndarray, list)):
raise ValueError("sampling frequency must be specified!")
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
fs = data.fs
if isinstance(data, (np.ndarray, list)):
data_array = np.array(data)
n_dims = np.array(data).ndim
assert n_dims <= 2, "Only 1D signals supported!"
if n_dims == 1:
input_data = data_array.reshape((1, data_array.size))
else:
input_data = data_array
n_signals, n_samples = input_data.shape
# Compute number of samples to compute fast FFTs
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
if isinstance(data, list):
envelope = envelope.tolist()
return envelope
elif isinstance(data, core.RegularlySampledAnalogSignalArray):
# Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported
assert data.data.ndim == 2
cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0)
newasa = data.copy()
# for segment in data:
for idx in range(data.n_epochs):
# print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs))
segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]]
n_signals, n_samples = segment_data.shape
# Compute number of samples to compute fast FFTs:
padlen = nextfastpower(n_samples) - n_samples
# Pad data
paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) )
# Use hilbert transform to get an envelope
envelope = np.absolute(hilbert(paddeddata, axis=-1))
# free up memory
del paddeddata
# Truncate results back to original length
envelope = envelope[..., :n_samples]
if sigma:
# Smooth envelope with a gaussian (sigma = 4 ms default)
EnvelopeSmoothingSD = sigma*fs
smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD,
mode='constant', axis=-1)
envelope = smoothed_envelope
newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope)
return newasa
def nextpower(n, base=2.0):
"""Return the next integral power of two greater than the given number.
Specifically, return m such that
m >= n
m == 2**x
where x is an integer. Use base argument to specify a base other than 2.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
"""
x = base**ceil (log (n) / log (base))
if type(n) == np.ndarray:
return np.asarray (x, dtype=int)
else:
return int (x)
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (<NAME>)
See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
@keyword_deprecation(replace_x_with_y={'bw':'truncate'})
def gaussian_filter(obj, *, fs=None, sigma=None, truncate=None, inplace=False, mode=None, cval=None, within_intervals=False):
"""Smooths with a Gaussian kernel.
Smoothing is applied along the abscissa, and the same smoothing is applied to each
signal in the RegularlySampledAnalogSignalArray, or to each unit in a BinnedSpikeTrainArray.
Smoothing is applied ACROSS intervals, but smoothing WITHIN intervals is also supported.
Parameters
----------
obj : RegularlySampledAnalogSignalArray or BinnedSpikeTrainArray.
fs : float, optional
Sampling rate (in obj.base_unit^-1) of obj. If not provided, it will
be inferred.
sigma : float, optional
Standard deviation of Gaussian kernel, in obj.base_units. Default is 0.05
(50 ms if base_unit=seconds).
truncate : float, optional
Bandwidth outside of which the filter value will be zero. Default is 4.0.
inplace : bool
If True the data will be replaced with the smoothed data.
Default is False.
mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional
The mode parameter determines how the array borders are handled,
where cval is the value when mode is equal to ‘constant’. Default is
‘reflect’.
cval : scalar, optional
Value to fill past edges of input if mode is ‘constant’. Default is 0.0.
within_intervals : boolean, optional
If True, then smooth within each epoch. Otherwise smooth across epochs.
Default is False.
Note that when mode = 'wrap', then smoothing within epochs aren't affected
by wrapping.
Returns
-------
out : same type as obj
An object with smoothed data is returned.
"""
if sigma is None:
sigma = 0.05
if truncate is None:
truncate = 4
if mode is None:
mode = 'reflect'
if cval is None:
cval = 0.0
if not inplace:
out = copy.deepcopy(obj)
else:
out = obj
if isinstance(out, core.RegularlySampledAnalogSignalArray):
if fs is None:
fs = out.fs
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
elif isinstance(out, core.BinnedEventArray):
bst = out
if fs is None:
fs = 1/bst.ds
if fs is None:
raise ValueError("fs must either be specified, or must be contained in the {}!".format(out.type_name))
else:
raise NotImplementedError("gaussian_filter for {} is not yet supported!".format(str(type(out))))
sigma = sigma * fs
if not within_intervals:
# see https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# (1) if smoothing across intervals, we work on a merged support
# (2) build abscissa_vals, including existing ones, and out-of-support ones
# (3) to smooth U, build auxiliary arrays V and W, with (V=U).nan=0, and (W=1).nan=0
# (4) Z = smooth(V)/smooth(W)
# (5) only keep original support, and original abscissa_vals
if isinstance(out, (core.RegularlySampledAnalogSignalArray, core.BinnedEventArray)):
support = out._abscissa.support.merge()
if not support.domain.is_finite:
support.domain = (support.start, support.stop) #TODO: #FIXME might come from abscissa definition, and not from support
missing_abscissa_vals = []
for interval in (~support):
missing_vals = frange(interval.start, interval.stop, 1/fs)
missing_abscissa_vals.extend(missing_vals)
if isinstance(out, core.RegularlySampledAnalogSignalArray):
n_signals = out.n_signals
n_samples = out.n_samples
elif isinstance(out, core.BinnedEventArray):
n_signals = out.n_series
n_samples = out.n_bins
V = np.zeros((n_signals, n_samples + len(missing_abscissa_vals)))
W = np.ones(V.shape)
all_abscissa_vals = np.sort(np.append(out._abscissa_vals, missing_abscissa_vals))
data_idx = np.searchsorted(all_abscissa_vals, out._abscissa_vals)
missing_idx = np.searchsorted(all_abscissa_vals, missing_abscissa_vals)
V[:, data_idx] = out.data
W[:, missing_idx] = 0
VV = scipy.ndimage.filters.gaussian_filter(V, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
WW = scipy.ndimage.filters.gaussian_filter(W, sigma=(0,sigma), truncate=truncate, mode=mode, cval=cval)
Z = VV[:,data_idx]/WW[:,data_idx]
out._data = Z
| |
max_size_for_optimizer = None
ret_item_list = ret_item_list[0:first_false]
ret_arr['items'] = ret_item_list
if max_size is not None:
ret_arr['maxItems'] = max_size
if max_size_for_optimizer is not None:
if max_size is None or max_size_for_optimizer < max_size:
ret_arr['maxItemsForOptimizer'] = max_size_for_optimizer
s_typed = [ret_arr]
# TODO: more!
assert not s_all
ret_all = []
ret_main = s_extra if s_extra else {}
if s_type_for_optimizer is not None:
ret_main["laleType"] = s_type_for_optimizer
if s_enum:
# we should simplify these as for s_not_enum
ret_main['enum']=list(s_enum)
# now, we do some extra work to keep 'laleType':'operator' annotations
if s_type_for_optimizer is None:
from lale.operators import Operator
if all(isinstance(x,Operator) for x in s_enum):
# All the enumeration values are operators
# This means it is probably an operator schema
# which might have been missed if
# this is being allOf'ed with an anyOfList
if s_any and all(hasAnyOperatorSchemas(s) for s in s_any):
ret_main["laleType"] = 'operator'
return ret_main
if ret_main:
if s_typed:
s_typed[0] = {**ret_main, **s_typed[0]}
elif s_other:
s_other[0] = {**ret_main, **s_other[0]}
else:
ret_all.append(ret_main)
if s_typed:
ret_all.extend(s_typed)
if s_other:
ret_all.extend(s_other)
if s_not_for_optimizer:
ret_all.extend(s_not_for_optimizer)
if s_one:
ret_all.extend(s_one)
if s_not_number_list:
ret_all.extend(s_not_number_list)
if s_not:
ret_all.extend(s_not)
if s_not_enum:
# We can't do not alongside anything else
# TODO: we should validate the list against the
# other parts of ret_all (this would need to move down): if any elements don't validate
# then they already would be excluded
# we can simplify +enum's the same way
ret_all_agg = makeAllOf(ret_all)
s_not_enum_simpl = enumValues(s_not_enum, ret_all_agg)
if s_not_enum_simpl:
sne = {'not':{'enum':list(s_not_enum)}}
ret_all.append(sne)
else:
logger.debug(f"simplifyAll: {s_not_enum} was a negated enum that was simplified away because its elements anyway don't satisfy the additional constraints {ret_all_agg}")
s_not_enum = s_not_enum_simpl
if not floatAny:
ret_all.extend([simplifyAny(s, False) for s in s_any])
ret_all_schema = makeAllOf(ret_all)
if floatAny and s_any:
args = list(([ret_all_schema], *tuple(s_any)))
cp = list(itertools.product(*args))
alls = [simplifyAll(list(s), False) for s in cp]
ret = simplifyAny(alls, False)
return ret
else:
return ret_all_schema
def simplifyAny(schema:List[Schema], floatAny:bool)->Schema:
s_any = schema
s_enum_list:List[set_with_str_for_keys[Any]] = []
s_not_enum_list:List[set_with_str_for_keys[Any]] = []
s_other:List[Schema] = []
s_not_for_optimizer:List[Schema] = []
while s_any:
l = s_any
s_any = []
for s in l:
if s is None:
continue
s = simplify(s, floatAny)
if s is None:
continue
if not isForOptimizer(s):
logger.info(f"simplifyAny: skipping not for optimizer {s} (after simplification)")
s_not_for_optimizer.append(s)
continue
if is_true_schema(s):
return STrue
if is_false_schema(s):
continue
if 'anyOf' in s:
s_any.extend(s['anyOf'])
elif 'enum' in s:
ev = enumValues(set_with_str_for_keys(s['enum']), s)
if ev:
s_enum_list.append(ev)
elif 'not' in s:
snot = s['not']
if 'enum' in s['not']:
ev = enumValues(set_with_str_for_keys(snot['enum']), snot)
if ev:
s_not_enum_list.append(ev)
else:
s_other.append(s)
s_enum:Optional[set_with_str_for_keys[Any]] = None
s_not_enum:Optional[set_with_str_for_keys[Any]] = None
if s_enum_list:
# if there are enumeration constraints, we want their intersection
s_enum = set_with_str_for_keys.union(*s_enum_list)
if s_not_enum_list:
s_not_enum = set_with_str_for_keys.intersection(*s_not_enum_list)
if s_enum and s_not_enum:
s_not_enum = set_with_str_for_keys.difference(s_not_enum, s_enum)
s_enum = None
assert not s_any
ret:List[Schema] = []
if s_enum:
ret.append({'enum':list(s_enum)})
if s_not_enum:
ret.append({'not':{'enum':list(s_not_enum)}})
ret.extend(s_other)
ret.extend(s_not_for_optimizer)
return makeAnyOf(ret)
def simplifyNot(schema:Schema, floatAny:bool)->Schema:
return simplifyNot_(schema, floatAny, alreadySimplified=False)
def simplifyNot_(schema:Schema, floatAny:bool, alreadySimplified:bool=False)->Schema:
"""alreadySimplified=true implies that schema has already been simplified"""
if 'not' in schema:
# if there is a not/not, we can just skip it
ret = simplify(schema['not'], floatAny)
return ret
elif 'anyOf' in schema:
anys = schema['anyOf']
alls = [{'not':s} for s in anys]
ret = simplifyAll(alls, floatAny)
return ret
elif 'allOf' in schema:
alls = schema['allOf']
anys = [{'not':s} for s in alls]
ret = simplifyAny(anys, floatAny)
return ret
elif not alreadySimplified:
s = simplify(schema, floatAny)
# it is possible that the result of calling simplify
# resulted in something that we can push 'not' down into
# so we call ourselves, being careful to avoid an infinite loop.
return simplifyNot_(s, floatAny, alreadySimplified=True)
else:
return {'not':schema}
def simplify(schema:Schema, floatAny:bool)->Schema:
""" Tries to simplify a schema into an equivalent but
more compact/simpler one. If floatAny if true, then
the only anyOf in the return value will be at the top level.
Using this option may cause a combinatorial blowup in the size
of the schema
"""
if is_true_schema(schema):
return STrue
if is_false_schema(schema):
return SFalse
if 'enum' in schema:
# TODO: simplify the schemas by removing anything that does not validate
# against the rest of the schema
return schema
if 'allOf' in schema:
ret = simplifyAll(schema['allOf'], floatAny)
return ret
elif 'anyOf' in schema:
ret = simplifyAny(schema['anyOf'], floatAny)
return ret
elif 'not' in schema:
return simplifyNot(schema['not'], floatAny)
elif 'type' in schema and schema['type'] == 'object' and 'properties' in schema:
schema2 = schema.copy()
props = {}
all_objs = [schema2]
## TODO: how does this interact with required?
## {k1:s_1, k2:anyOf:[s2s], k3:anyOf:[s3s]}
## If floatAny is true and any properties have an anyOf in them
## we need to float it out to the top. We can then
## give it to simplifyAll, which does the cross product to lift
## them out of the list
for k,v in schema['properties'].items():
s = simplify(v, floatAny)
if is_false_schema(s) and 'required' in schema and s in schema['required']:
logger.info(f"simplify: required key {k} is False, so the entire schema {schema} is False")
return impossible()
if (not is_true_schema(s)) and floatAny and 'anyOf' in s:
all_objs.append({'anyOf':[{'type':'object', 'properties':{k:vv}} for vv in s['anyOf']]})
# If we are disallowing additionalProperties, then we can't remove this property entirely
if not schema.get('additionalProperties', True):
props[k] = STrue
else:
props[k] = s
schema2['properties'] = props
if len(all_objs) == 1:
return schema2
else:
# The termination argument here is somewhat subtle
s = simplifyAll(all_objs, floatAny)
return s
else:
return schema
# TODO: semantically, allOf should force an intersection
# of relevantFields, yet union seems kinder to the user/more modular (at least if additionalProperties:True)
def findRelevantFields(schema:Schema) -> Optional[Set[str]]:
"""Either returns the relevant fields for the schema, or None if there was none specified"""
if 'allOf' in schema:
fields_list:List[Optional[Set[str]]] = [findRelevantFields(s) for s in schema['allOf']]
real_fields_list:List[Set[str]] = [f for f in fields_list if f is not None]
if real_fields_list:
return set.union(*real_fields_list)
else:
return None
else:
if 'relevantToOptimizer' in schema:
return set(schema['relevantToOptimizer'])
else:
return None
# does not handle nested objects and nested relevant fields well
def narrowToGivenRelevantFields(schema:Schema, relevantFields:Set[str])->Schema:
if schema is False:
return False
if 'anyOf' in schema:
return {'anyOf':[narrowToGivenRelevantFields(a, relevantFields) for a in schema['anyOf']]}
if 'allOf' in schema:
return {'allOf':[narrowToGivenRelevantFields(a, relevantFields) for a in schema['allOf']]}
if 'not' in schema:
return {'not':narrowToGivenRelevantFields(schema['not'], relevantFields)}
if 'type' in schema and schema['type'] == "object" and 'properties' in schema:
props = schema['properties']
new_props = {k:narrowToGivenRelevantFields(v, relevantFields) for (k,v) in props.items() if k in relevantFields}
schema2 = schema.copy()
schema2['properties'] = new_props
if 'required' in schema:
reqs = set(schema['required'])
schema2['required'] = list(reqs.intersection(relevantFields))
return schema2
else:
return schema
def narrowToRelevantFields(schema:Schema)->Schema:
relevantFields:Optional[Set[str]] = findRelevantFields(schema)
if relevantFields is not None:
return narrowToGivenRelevantFields(schema, relevantFields)
else:
return schema
# Given a json schema, removes any elements marked as 'forOptimizer:false'
# also does some basic simplifications
def filterForOptimizer(schema:Schema)->Optional[Schema]:
if schema is None or is_true_schema(schema) or is_false_schema(schema):
return schema
if not isForOptimizer(schema):
return None
if 'anyOf' in schema:
subs = schema['anyOf']
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeAnyOf(sch_nnil)
else:
return None
if 'allOf' in schema:
subs = schema['allOf']
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
filtered_sch = sch_nnil
if len(sch_nnil) != len(sch):
# Questionable semantics here (aka HACK!!!!)
# Since we removed something from the schema
# we will also remove negated schemas
filtered_sch = [s for s in sch_nnil if not isinstance(s, dict) or 'not' not in s]
if filtered_sch:
return makeAllOf(filtered_sch)
else:
return None
if 'oneOf' in schema:
subs = schema['oneOf']
sch = [filterForOptimizer(s) for s in subs]
sch_nnil = [s for s in sch if s is not None]
if sch_nnil:
return makeOneOf(sch_nnil)
else:
return None
if 'not' in schema:
s = filterForOptimizer(schema['not'])
if s is None:
return None
else:
return {'not':s}
if 'type' in schema and schema['type'] == 'object' and 'properties' in schema:
required = schema.get('required', None)
props = {}
for k,v in schema['properties'].items():
s = filterForOptimizer(v)
if s is None:
# if | |
in Python Dash Gallery,
found at: https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-tsne/demo.py
'''
# Callback function for the learn-more button
@app.callback(
[
Output("description-text", "children"),
Output("learn-more-button", "children"),
],
[Input("learn-more-button", "n_clicks")],
)
def learn_more(n_clicks):
# If clicked odd times, the instructions will show; else (even times), only the header will show
if n_clicks is None:
n_clicks = 0
if (n_clicks % 2) == 1:
n_clicks += 1
return (
html.Div(
style={"padding-right": "15%"},
children=[dcc.Markdown(demo_description_md)],
),
"Close",
)
else:
n_clicks += 1
return (
html.Div(
style={"padding-right": "15%"},
children=[dcc.Markdown(demo_intro_md)],
),
"Learn More",
)
'''
The following function has been adapted from an example in Python Dash Gallery,
found at: https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-tsne/demo.py
'''
@app.callback(
[
Output("graph-2d-plot-umap", "figure"),
Output("strategy", "disabled"),
Output("slider-samplesize", "disabled"),
Output("slider-epochs", "disabled"),
Output("slider-lr", "disabled"),
Output("graph-2d-plot-umap", "clickData"),
#Output('div-plot-label-message', 'children')
],
[
Input("train-button", "n_clicks"),
Input("strategy", "value"),
Input("slider-samplesize", "value"),
Input("slider-epochs", "value"),
Input("slider-lr", "value")
],
)
def display_3d_scatter_plot(
train_clicks,
strategy,
samplesize,
epochs,
lr
):
strategy_disabled = False
samplesize_disabled = False
epochs_disabled = False
lr_disabled = False
# Plot layout
layout = go.Layout(
title = dict(text="Visualization of image embeddings",
xanchor="center",
yanchor="middle",
x=0.5
),
titlefont = dict(
family = 'Arial, sans-serif',
size = 20,
color = '#6c7182'
),
showlegend=True,
margin=dict(l=0, r=0, t=50, b=0),
xaxis = dict(autorange=True,
showgrid=False,
showline=False,
zeroline=False,
ticks='',
showticklabels=False),
yaxis = dict(autorange=True,
showgrid=False,
showline=False,
zeroline=False,
ticks='',
showticklabels=False),
legend=dict(yanchor="top",
y=0.99,xanchor="left",
x=0.01),
paper_bgcolor="#f2f3f4"
)
global data, train_obj, EMB_HISTORY, orig_x, umap_embeddings_random, labels_text, prev_train_clicks
print("train_clicks: ", train_clicks)
prev_train_clicks = train_clicks
if train_clicks > 0:
if train_clicks == 1: # and reset_click == 0:
# disable parameter components
strategy_disabled = True
samplesize_disabled = True
epochs_disabled = True
lr_disabled = True
orig_x = torch.empty([samplesize, 28, 28], dtype=torch.uint8)
'''
training
'''
print("start training: ", datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# create model directory
train_obj = Train(net, handler, epochs, lr, data, model_dir)
train_obj.train()
(X_TOLB, X_NOLB) = data_to_label(strategy)
data.update_nolabel(X_NOLB)
data.update_tolabel(X_TOLB)
train_obj.update_data(data)
print('train obj x nolb shape {}'.format(train_obj.data.X_NOLB.shape))
embeddings_tr = train_obj.get_trained_embedding()
embeddings_tolb = train_obj.get_tolb_embedding()
embeddings = np.concatenate((embeddings_tr, embeddings_tolb), axis=0)
labels = np.concatenate((data.Y.numpy(),
np.ones(embeddings_tolb.shape[0])*15),
axis=0)
labels_text = [str(int(item)) for item in labels]
labels_text = ["to label" if x == "15" else x for x in labels_text]
orig_x = np.concatenate((data.X.numpy(), data.X_TOLB.numpy()),axis=0)
umap_embeddings = reducer.fit_transform(embeddings)
EMB_HISTORY = (umap_embeddings, labels)
print("end training: ", datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
elif train_clicks > 1: # and reset_click == 0:
# disable parameter components
strategy_disabled = True
samplesize_disabled = True
epochs_disabled = True
lr_disabled = True
'''
training
'''
print("start training: ", datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
train_obj.train()
(X_TOLB, X_NOLB) = data_to_label(strategy)
data.update_nolabel(X_NOLB)
data.update_tolabel(X_TOLB)
train_obj.update_data(data)
print('train obj x nolb shape {}'.format(train_obj.data.X_NOLB.shape))
embeddings_tr = train_obj.get_trained_embedding()
embeddings_tolb = train_obj.get_tolb_embedding()
embeddings = np.concatenate((embeddings_tr, embeddings_tolb), axis=0)
labels = np.concatenate((data.Y.numpy(),
np.ones(embeddings_tolb.shape[0])*15),
axis=0)
labels_text = [str(int(item)) for item in labels]
labels_text = ["to label" if x == "15" else x for x in labels_text]
orig_x = np.concatenate((data.X.numpy(), data.X_TOLB.numpy()),axis=0)
umap_embeddings = reducer.fit_transform(embeddings)
EMB_HISTORY = (umap_embeddings, labels)
print("end training: ", datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
else:
if EMB_HISTORY is not None:
# disable parameter components
strategy_disabled = True
samplesize_disabled = True
epochs_disabled = True
lr_disabled = True
print("with embedding history")
umap_embeddings = EMB_HISTORY[0]
labels = EMB_HISTORY[1]
print('umap x{} y{}'.format(umap_embeddings[0,0], umap_embeddings[0,1]))
labels_text = [str(int(item)) for item in labels]
labels_text = ["to label" if x == "15" else x for x in labels_text]
else:
print("no embedding history")
train_ratio = samplesize/X_TR.shape[0]
print("train_ratio", train_ratio)
X_NOLB, X, Y_NOLB, Y = train_test_split(X_TR, Y_TR,
test_size=train_ratio,
random_state=seed,
shuffle=True)
X_TOLB = torch.empty([10, 28, 28], dtype=torch.uint8)
'''
Make data and train objects
'''
data = Data(X, Y, X_TE, Y_TE, X_NOLB, X_TOLB,
data_transform,
handler, n_classes)
print("data.X: ", data.X.shape)
x = np.random.rand(samplesize).reshape(samplesize, 1)
y = np.random.rand(samplesize).reshape(samplesize, 1)
umap_embeddings = np.concatenate((x, y), axis=1)
umap_embeddings_random = umap_embeddings
labels = data.Y.numpy()
labels_text = [str(int(item)) for item in labels]
orig_x = data.X.numpy()
embedding_df = pd.DataFrame(data=umap_embeddings, columns=["dim1", "dim2"])
embedding_df['labels'] = labels_text
groups = embedding_df.groupby("labels")
figure = generate_figure_image(groups, layout)
return [figure, strategy_disabled, samplesize_disabled, epochs_disabled, lr_disabled, None]
@app.callback(
[
Output("train-button", "n_clicks")
],
[
Input("reset-button", "n_clicks")
]
)
def reset(
reset_clicks
):
print("reset_clicks: ", reset_clicks)
global EMB_HISTORY, prev_reset_clicks
if reset_clicks >= 1:
# need to take care of training results
if os.path.exists(model_dir):
try:
shutil.rmtree(model_dir)
except OSError as e:
print("Error: %s : %s" % (model_dir, e.strerror))
EMB_HISTORY = None
prev_reset_clicks = reset_clicks
return [0]
#print("prev_train_clicks: ", prev_train_clicks)
return [prev_train_clicks]
'''
The following function has been inspired from an example in Python Dash Gallery,
found at: https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-tsne/demo.py
'''
@app.callback(
[
Output("div-plot-click-image", "children"),
Output("div-label-controls", "children"),
],
[
Input("graph-2d-plot-umap", "clickData")
]
)
def display_click_image(clickData):
#print("its div-plot-click-image")
global X_tolb_index
if clickData:
print(clickData)
# Convert the point clicked into float64 numpy array
click_point_np = np.array(
[clickData["points"][0][i] for i in ["x", "y"]]
).astype(np.float64)
print(click_point_np)
clicked_idx = None
if EMB_HISTORY is not None:
umap_embeddings = EMB_HISTORY[0]
clicked_idx = np.where((umap_embeddings == (click_point_np)).all(axis=1))[0][0]
#print(clicked_idx)
else:
umap_embeddings = umap_embeddings_random
clicked_idx = np.where((umap_embeddings == (click_point_np)).all(axis=1))[0][0]
#print(clicked_idx)
image_vector = orig_x[clicked_idx]
X_tolb_index = None
if labels_text[clicked_idx] == "to label":
X_tolb_index = np.where((data.X_TOLB.numpy() == image_vector).all((1,2)))[0][0]
image_np = image_vector.reshape(28, 28).astype(np.float64)
# Encode image into base 64
image_b64 = numpy_to_b64(image_np)
print(labels_text[clicked_idx])
if labels_text[clicked_idx] == "to label":
print("within to label")
return(
html.Div(
html.Img(
src='data:image/png;base64,{}'.format(image_b64),
style={"height": "25vh", "display": "block", "margin": "auto"},
)
),
html.Div(
style={"visibility": "visible"},
children=[
dcc.Input(
id="input-label",
type="number",
min=0, max=9,
step=1,
placeholder=0,
style={
"size": "120",
"text-align": "center",
"margin-top": "5px",
"margin-left": "50px",
"margin-right": "5px",
"margin-bottom": "10px",
"font-weight": "bold",
},
),
html.Button(
id="submit-button",
n_clicks=0,
children=["Submit"],
style={
"text-align": "center",
"margin-top": "5px",
"margin-left": "10px",
"margin-right": "5px",
"margin-bottom": "10px",
"font-weight": "bold",
},
),
],
)
)
else:
return(
html.Div(
html.Img(
src='data:image/png;base64,{}'.format(image_b64),
style={"height": "25vh", "display": "block", "margin": "auto"},
),
),
html.Div(
style={"visibility": "hidden"},
children=[
dcc.Input(
id="input-label",
type="number",
min=0, max=9,
step=1,
placeholder=0,
style={
"size": "120",
"text-align": "center",
"margin-top": "5px",
"margin-left": "50px",
"margin-right": "5px",
"margin-bottom": "10px",
"font-weight": "bold",
},
),
html.Button(
id="submit-button",
n_clicks=0,
children=["Submit"],
style={
"text-align": "center",
"margin-top": "5px",
"margin-left": "10px",
"margin-right": "5px",
"margin-bottom": "10px",
"font-weight": "bold",
},
),
],
)
)
else:
#print("no clickData")
return (None, None)
'''
The following function has been adapted from an example in Python Dash Gallery,
found at: https://github.com/plotly/dash-sample-apps/tree/master/apps/dash-tsne/demo.py
'''
@app.callback(
Output("div-plot-click-message", "children"),
[
Input("graph-2d-plot-umap", "clickData")
]
)
def display_click_message(clickData):
# Displays message shown when a point in the graph is clicked
if clickData:
return "Image Selected"
else:
return "Click a data point on the scatter plot to display its corresponding image."
@app.callback(
Output('div-plot-label-message', 'children'),
[
Input('submit-button', 'n_clicks')
],
[State('input-label', 'value')]
)
def update_output(submit_clicks, input_label):
global data, labels_text, prev_submit_clicks, EMB_HISTORY, orig_x
print("submit_clicks: ", submit_clicks)
if submit_clicks == 1:
prev_submit_clicks = submit_clicks
Y_TOLB = torch.tensor([input_label])
print("y labeled: ", Y_TOLB)
print("data.X_TOLB: ", data.X_TOLB.shape)
print("X_tolb_index: ", X_tolb_index)
# make a copy of the current embeddings
prev_embeddings = EMB_HISTORY[0]
# update embedding history w/o refitting
embeddings = np.concatenate((prev_embeddings[:data.X.shape[0]],
prev_embeddings[data.X.shape[0]+X_tolb_index].reshape(1, prev_embeddings[data.X.shape[0]+X_tolb_index].shape[0]),
prev_embeddings[data.X.shape[0]:data.X.shape[0]+X_tolb_index],
prev_embeddings[data.X.shape[0]+X_tolb_index+1:]
), axis=0)
# how to get corresponding X_TOLB?
X = torch.cat([data.X, data.X_TOLB[X_tolb_index].reshape(1, data.X_TOLB[X_tolb_index].shape[0], data.X_TOLB[X_tolb_index].shape[1])], dim=0)
Y = torch.cat([data.Y, Y_TOLB], dim=0)
data.update_data(X, Y)
print("data.X: ", data.X.shape)
print("data.Y: ", data.Y.shape)
# update X_TOLB and orig_x
X_TOLB = torch.cat([data.X_TOLB[:X_tolb_index], data.X_TOLB[X_tolb_index+1:]])
data.update_tolabel(X_TOLB)
orig_x = np.concatenate((data.X.numpy(), data.X_TOLB.numpy()),axis=0)
# update labels_text
labels = np.concatenate((data.Y.numpy(),
np.ones(data.X_TOLB.shape[0])*15),
axis=0)
labels_text = [str(int(item)) for item in labels]
labels_text = ["to label" if x == "15" else x for x in labels_text]
# update embedding history
EMB_HISTORY = (embeddings, labels)
return u'''Training dataset has {} datapoints'''.format(data.X.shape[0])
else:
return u''' '''
'''
The following function has been inspired from an example in Python Dash Gallery,
found at: https://github.com/plotly/dash-sample-apps/blob/master/apps/dash-live-model-training/app.py
'''
@app.callback(
Output("div-results-loss-graph", "figure"),
[
Input('loss-graph-update', 'n_intervals')
]
)
def display_loss_results(n):
if 'train_obj' in vars() or 'train_obj' in globals():
step = list(train_obj.step)
y_train = list(train_obj.train_loss)
y_val = list(train_obj.val_loss)
else:
step = [1, 10]
y_train = []
y_val = []
layout = go.Layout(
title = dict(text="Training and Validation Loss",
xanchor="center",
yanchor="middle",
x=0.5
),
titlefont = dict(
family = 'Arial, sans-serif',
size = 15,
color = '#6c7182'
),
margin=go.layout.Margin(l=0, r=0, b=0, t=20),
yaxis={"title": "cross entropy loss",},
xaxis={
"title": "epochs",
"range": [min(step),max(step)]
},
legend=dict(yanchor="bottom", y=0.1,
xanchor="left", x=0.01),
paper_bgcolor="#f2f3f4",
| |
pyxb.utils.utility.Location(u'avm.xsd', 474, 8)
Expression = property(__Expression.value, __Expression.set, None, None)
_ElementMap.update({
__Operand.name() : __Operand
})
_AttributeMap.update({
__Expression.name() : __Expression
})
Namespace.addCategoryObject('typeBinding', u'ComplexFormula', ComplexFormula_)
# Complex type {avm}DomainModelPort with content type EMPTY
class DomainModelPort_ (Port_):
"""Complex type {avm}DomainModelPort with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DomainModelPort')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 186, 2)
_ElementMap = Port_._ElementMap.copy()
_AttributeMap = Port_._AttributeMap.copy()
# Base type is Port_
# Attribute Notes inherited from {avm}Port
# Attribute XPosition inherited from {avm}Port
# Attribute Definition inherited from {avm}Port
# Attribute YPosition inherited from {avm}Port
# Attribute Name inherited from {avm}Port
# Attribute ID inherited from {avm}PortMapTarget
# Attribute PortMap inherited from {avm}PortMapTarget
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'DomainModelPort', DomainModelPort_)
# Complex type {avm}AbstractPort with content type EMPTY
class AbstractPort_ (Port_):
"""Complex type {avm}AbstractPort with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'AbstractPort')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 310, 2)
_ElementMap = Port_._ElementMap.copy()
_AttributeMap = Port_._AttributeMap.copy()
# Base type is Port_
# Attribute Notes inherited from {avm}Port
# Attribute XPosition inherited from {avm}Port
# Attribute Definition inherited from {avm}Port
# Attribute YPosition inherited from {avm}Port
# Attribute Name inherited from {avm}Port
# Attribute ID inherited from {avm}PortMapTarget
# Attribute PortMap inherited from {avm}PortMapTarget
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'AbstractPort', AbstractPort_)
Component = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Component'), Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 4, 2))
Namespace.addCategoryObject('elementBinding', Component.name().localName(), Component)
DomainModel = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DomainModel'), DomainModel_, location=pyxb.utils.utility.Location(u'avm.xsd', 5, 2))
Namespace.addCategoryObject('elementBinding', DomainModel.name().localName(), DomainModel)
Property = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Property'), Property_, location=pyxb.utils.utility.Location(u'avm.xsd', 6, 2))
Namespace.addCategoryObject('elementBinding', Property.name().localName(), Property)
Resource = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Resource'), Resource_, location=pyxb.utils.utility.Location(u'avm.xsd', 11, 2))
Namespace.addCategoryObject('elementBinding', Resource.name().localName(), Resource)
DomainModelParameter = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DomainModelParameter'), DomainModelParameter_, location=pyxb.utils.utility.Location(u'avm.xsd', 15, 2))
Namespace.addCategoryObject('elementBinding', DomainModelParameter.name().localName(), DomainModelParameter)
ValueExpressionType = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ValueExpressionType'), ValueExpressionType_, location=pyxb.utils.utility.Location(u'avm.xsd', 17, 2))
Namespace.addCategoryObject('elementBinding', ValueExpressionType.name().localName(), ValueExpressionType)
DistributionRestriction = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DistributionRestriction'), DistributionRestriction_, location=pyxb.utils.utility.Location(u'avm.xsd', 20, 2))
Namespace.addCategoryObject('elementBinding', DistributionRestriction.name().localName(), DistributionRestriction)
DomainModelMetric = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DomainModelMetric'), DomainModelMetric_, location=pyxb.utils.utility.Location(u'avm.xsd', 24, 2))
Namespace.addCategoryObject('elementBinding', DomainModelMetric.name().localName(), DomainModelMetric)
AnalysisConstruct = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'AnalysisConstruct'), AnalysisConstruct_, location=pyxb.utils.utility.Location(u'avm.xsd', 30, 2))
Namespace.addCategoryObject('elementBinding', AnalysisConstruct.name().localName(), AnalysisConstruct)
Design = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Design'), Design_, location=pyxb.utils.utility.Location(u'avm.xsd', 32, 2))
Namespace.addCategoryObject('elementBinding', Design.name().localName(), Design)
Container = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Container'), Container_, location=pyxb.utils.utility.Location(u'avm.xsd', 33, 2))
Namespace.addCategoryObject('elementBinding', Container.name().localName(), Container)
ComponentInstance = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ComponentInstance'), ComponentInstance_, location=pyxb.utils.utility.Location(u'avm.xsd', 37, 2))
Namespace.addCategoryObject('elementBinding', ComponentInstance.name().localName(), ComponentInstance)
ComponentPrimitivePropertyInstance = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ComponentPrimitivePropertyInstance'), ComponentPrimitivePropertyInstance_, location=pyxb.utils.utility.Location(u'avm.xsd', 39, 2))
Namespace.addCategoryObject('elementBinding', ComponentPrimitivePropertyInstance.name().localName(), ComponentPrimitivePropertyInstance)
ValueNode = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ValueNode'), ValueNode_, location=pyxb.utils.utility.Location(u'avm.xsd', 46, 2))
Namespace.addCategoryObject('elementBinding', ValueNode.name().localName(), ValueNode)
Operand = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Operand'), Operand_, location=pyxb.utils.utility.Location(u'avm.xsd', 48, 2))
Namespace.addCategoryObject('elementBinding', Operand.name().localName(), Operand)
ConnectorFeature = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ConnectorFeature'), ConnectorFeature_, location=pyxb.utils.utility.Location(u'avm.xsd', 50, 2))
Namespace.addCategoryObject('elementBinding', ConnectorFeature.name().localName(), ConnectorFeature)
ContainerFeature = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ContainerFeature'), ContainerFeature_, location=pyxb.utils.utility.Location(u'avm.xsd', 51, 2))
Namespace.addCategoryObject('elementBinding', ContainerFeature.name().localName(), ContainerFeature)
DomainMapping = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DomainMapping'), DomainMapping_, location=pyxb.utils.utility.Location(u'avm.xsd', 52, 2))
Namespace.addCategoryObject('elementBinding', DomainMapping.name().localName(), DomainMapping)
TestBench = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'TestBench'), TestBench_, location=pyxb.utils.utility.Location(u'avm.xsd', 53, 2))
Namespace.addCategoryObject('elementBinding', TestBench.name().localName(), TestBench)
ContainerInstanceBase = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ContainerInstanceBase'), ContainerInstanceBase_, location=pyxb.utils.utility.Location(u'avm.xsd', 57, 2))
Namespace.addCategoryObject('elementBinding', ContainerInstanceBase.name().localName(), ContainerInstanceBase)
TestBenchValueBase = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'TestBenchValueBase'), TestBenchValueBase_, location=pyxb.utils.utility.Location(u'avm.xsd', 58, 2))
Namespace.addCategoryObject('elementBinding', TestBenchValueBase.name().localName(), TestBenchValueBase)
Workflow = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Workflow'), Workflow_, location=pyxb.utils.utility.Location(u'avm.xsd', 60, 2))
Namespace.addCategoryObject('elementBinding', Workflow.name().localName(), Workflow)
WorkflowTaskBase = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'WorkflowTaskBase'), WorkflowTaskBase_, location=pyxb.utils.utility.Location(u'avm.xsd', 61, 2))
Namespace.addCategoryObject('elementBinding', WorkflowTaskBase.name().localName(), WorkflowTaskBase)
Settings = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Settings'), Settings_, location=pyxb.utils.utility.Location(u'avm.xsd', 64, 2))
Namespace.addCategoryObject('elementBinding', Settings.name().localName(), Settings)
DesignDomainFeature = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DesignDomainFeature'), DesignDomainFeature_, location=pyxb.utils.utility.Location(u'avm.xsd', 66, 2))
Namespace.addCategoryObject('elementBinding', DesignDomainFeature.name().localName(), DesignDomainFeature)
Value = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Value'), Value_, location=pyxb.utils.utility.Location(u'avm.xsd', 7, 2))
Namespace.addCategoryObject('elementBinding', Value.name().localName(), Value)
FixedValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'FixedValue'), FixedValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 8, 2))
Namespace.addCategoryObject('elementBinding', FixedValue.name().localName(), FixedValue)
CalculatedValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'CalculatedValue'), CalculatedValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 9, 2))
Namespace.addCategoryObject('elementBinding', CalculatedValue.name().localName(), CalculatedValue)
DerivedValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DerivedValue'), DerivedValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 10, 2))
Namespace.addCategoryObject('elementBinding', DerivedValue.name().localName(), DerivedValue)
ParametricValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ParametricValue'), ParametricValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 16, 2))
Namespace.addCategoryObject('elementBinding', ParametricValue.name().localName(), ParametricValue)
ProbabilisticValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ProbabilisticValue'), ProbabilisticValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 18, 2))
Namespace.addCategoryObject('elementBinding', ProbabilisticValue.name().localName(), ProbabilisticValue)
SecurityClassification = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'SecurityClassification'), SecurityClassification_, location=pyxb.utils.utility.Location(u'avm.xsd', 21, 2))
Namespace.addCategoryObject('elementBinding', SecurityClassification.name().localName(), SecurityClassification)
Proprietary = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Proprietary'), Proprietary_, location=pyxb.utils.utility.Location(u'avm.xsd', 22, 2))
Namespace.addCategoryObject('elementBinding', Proprietary.name().localName(), Proprietary)
ITAR = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ITAR'), ITAR_, location=pyxb.utils.utility.Location(u'avm.xsd', 23, 2))
Namespace.addCategoryObject('elementBinding', ITAR.name().localName(), ITAR)
PrimitiveProperty = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'PrimitiveProperty'), PrimitiveProperty_, location=pyxb.utils.utility.Location(u'avm.xsd', 26, 2))
Namespace.addCategoryObject('elementBinding', PrimitiveProperty.name().localName(), PrimitiveProperty)
CompoundProperty = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'CompoundProperty'), CompoundProperty_, location=pyxb.utils.utility.Location(u'avm.xsd', 27, 2))
Namespace.addCategoryObject('elementBinding', CompoundProperty.name().localName(), CompoundProperty)
ParametricEnumeratedValue = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ParametricEnumeratedValue'), ParametricEnumeratedValue_, location=pyxb.utils.utility.Location(u'avm.xsd', 28, 2))
Namespace.addCategoryObject('elementBinding', ParametricEnumeratedValue.name().localName(), ParametricEnumeratedValue)
DataSource = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DataSource'), DataSource_, location=pyxb.utils.utility.Location(u'avm.xsd', 31, 2))
Namespace.addCategoryObject('elementBinding', DataSource.name().localName(), DataSource)
Compound = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Compound'), Compound_, location=pyxb.utils.utility.Location(u'avm.xsd', 34, 2))
Namespace.addCategoryObject('elementBinding', Compound.name().localName(), Compound)
DesignSpaceContainer = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DesignSpaceContainer'), DesignSpaceContainer_, location=pyxb.utils.utility.Location(u'avm.xsd', 40, 2))
Namespace.addCategoryObject('elementBinding', DesignSpaceContainer.name().localName(), DesignSpaceContainer)
PortMapTarget = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'PortMapTarget'), PortMapTarget_, location=pyxb.utils.utility.Location(u'avm.xsd', 41, 2))
Namespace.addCategoryObject('elementBinding', PortMapTarget.name().localName(), PortMapTarget)
ConnectorCompositionTarget = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ConnectorCompositionTarget'), ConnectorCompositionTarget_, location=pyxb.utils.utility.Location(u'avm.xsd', 43, 2))
Namespace.addCategoryObject('elementBinding', ConnectorCompositionTarget.name().localName(), ConnectorCompositionTarget)
Formula = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Formula'), Formula_, location=pyxb.utils.utility.Location(u'avm.xsd', 44, 2))
Namespace.addCategoryObject('elementBinding', Formula.name().localName(), Formula)
DoDDistributionStatement = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DoDDistributionStatement'), DoDDistributionStatement_, location=pyxb.utils.utility.Location(u'avm.xsd', 49, 2))
Namespace.addCategoryObject('elementBinding', DoDDistributionStatement.name().localName(), DoDDistributionStatement)
TopLevelSystemUnderTest = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'TopLevelSystemUnderTest'), TopLevelSystemUnderTest_, location=pyxb.utils.utility.Location(u'avm.xsd', 54, 2))
Namespace.addCategoryObject('elementBinding', TopLevelSystemUnderTest.name().localName(), TopLevelSystemUnderTest)
Parameter = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Parameter'), Parameter_, location=pyxb.utils.utility.Location(u'avm.xsd', 55, 2))
Namespace.addCategoryObject('elementBinding', Parameter.name().localName(), Parameter)
Metric = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Metric'), Metric_, location=pyxb.utils.utility.Location(u'avm.xsd', 56, 2))
Namespace.addCategoryObject('elementBinding', Metric.name().localName(), Metric)
TestInjectionPoint = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'TestInjectionPoint'), TestInjectionPoint_, location=pyxb.utils.utility.Location(u'avm.xsd', 59, 2))
Namespace.addCategoryObject('elementBinding', TestInjectionPoint.name().localName(), TestInjectionPoint)
InterpreterTask = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'InterpreterTask'), InterpreterTask_, location=pyxb.utils.utility.Location(u'avm.xsd', 62, 2))
Namespace.addCategoryObject('elementBinding', InterpreterTask.name().localName(), InterpreterTask)
ExecutionTask = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ExecutionTask'), ExecutionTask_, location=pyxb.utils.utility.Location(u'avm.xsd', 63, 2))
Namespace.addCategoryObject('elementBinding', ExecutionTask.name().localName(), ExecutionTask)
ValueFlowMux = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ValueFlowMux'), ValueFlowMux_, location=pyxb.utils.utility.Location(u'avm.xsd', 65, 2))
Namespace.addCategoryObject('elementBinding', ValueFlowMux.name().localName(), ValueFlowMux)
Connector = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Connector'), Connector_, location=pyxb.utils.utility.Location(u'avm.xsd', 12, 2))
Namespace.addCategoryObject('elementBinding', Connector.name().localName(), Connector)
Port = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Port'), Port_, location=pyxb.utils.utility.Location(u'avm.xsd', 13, 2))
Namespace.addCategoryObject('elementBinding', Port.name().localName(), Port)
NormalDistribution = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'NormalDistribution'), NormalDistribution_, location=pyxb.utils.utility.Location(u'avm.xsd', 19, 2))
Namespace.addCategoryObject('elementBinding', NormalDistribution.name().localName(), NormalDistribution)
UniformDistribution = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'UniformDistribution'), UniformDistribution_, location=pyxb.utils.utility.Location(u'avm.xsd', 25, 2))
Namespace.addCategoryObject('elementBinding', UniformDistribution.name().localName(), UniformDistribution)
Optional = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Optional'), Optional_, location=pyxb.utils.utility.Location(u'avm.xsd', 35, 2))
Namespace.addCategoryObject('elementBinding', Optional.name().localName(), Optional)
Alternative = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'Alternative'), Alternative_, location=pyxb.utils.utility.Location(u'avm.xsd', 36, 2))
Namespace.addCategoryObject('elementBinding', Alternative.name().localName(), Alternative)
ComponentPortInstance = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ComponentPortInstance'), ComponentPortInstance_, location=pyxb.utils.utility.Location(u'avm.xsd', 38, 2))
Namespace.addCategoryObject('elementBinding', ComponentPortInstance.name().localName(), ComponentPortInstance)
ComponentConnectorInstance = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ComponentConnectorInstance'), ComponentConnectorInstance_, location=pyxb.utils.utility.Location(u'avm.xsd', 42, 2))
Namespace.addCategoryObject('elementBinding', ComponentConnectorInstance.name().localName(), ComponentConnectorInstance)
SimpleFormula = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'SimpleFormula'), SimpleFormula_, location=pyxb.utils.utility.Location(u'avm.xsd', 45, 2))
Namespace.addCategoryObject('elementBinding', SimpleFormula.name().localName(), SimpleFormula)
ComplexFormula = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'ComplexFormula'), ComplexFormula_, location=pyxb.utils.utility.Location(u'avm.xsd', 47, 2))
Namespace.addCategoryObject('elementBinding', ComplexFormula.name().localName(), ComplexFormula)
DomainModelPort = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'DomainModelPort'), DomainModelPort_, location=pyxb.utils.utility.Location(u'avm.xsd', 14, 2))
Namespace.addCategoryObject('elementBinding', DomainModelPort.name().localName(), DomainModelPort)
AbstractPort = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'AbstractPort'), AbstractPort_, location=pyxb.utils.utility.Location(u'avm.xsd', 29, 2))
Namespace.addCategoryObject('elementBinding', AbstractPort.name().localName(), AbstractPort)
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'DomainModel'), DomainModel_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 72, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Property'), Property_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 73, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'ResourceDependency'), Resource_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 74, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Connector'), Connector_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 75, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'DistributionRestriction'), DistributionRestriction_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 76, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Port'), Port_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 77, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Classifications'), pyxb.binding.datatypes.anyURI, nillable=pyxb.binding.datatypes.boolean(1), scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 78, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'AnalysisConstruct'), AnalysisConstruct_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 79, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Supercedes'), pyxb.binding.datatypes.string, nillable=pyxb.binding.datatypes.boolean(1), scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 80, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'Formula'), Formula_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 81, 6)))
Component_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'DomainMapping'), DomainMapping_, scope=Component_, location=pyxb.utils.utility.Location(u'avm.xsd', 82, 6)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 72, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 73, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 74, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 75, 6))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 76, 6))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 77, 6))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 78, 6))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 79, 6))
counters.add(cc_7)
cc_8 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 80, 6))
counters.add(cc_8)
cc_9 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 81, 6))
counters.add(cc_9)
cc_10 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location(u'avm.xsd', 82, 6))
counters.add(cc_10)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'DomainModel')), pyxb.utils.utility.Location(u'avm.xsd', 72, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Property')), pyxb.utils.utility.Location(u'avm.xsd', 73, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'ResourceDependency')), pyxb.utils.utility.Location(u'avm.xsd', 74, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Connector')), pyxb.utils.utility.Location(u'avm.xsd', 75, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'DistributionRestriction')), pyxb.utils.utility.Location(u'avm.xsd', 76, 6))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Port')), pyxb.utils.utility.Location(u'avm.xsd', 77, 6))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Classifications')), pyxb.utils.utility.Location(u'avm.xsd', 78, 6))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_7, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'AnalysisConstruct')), pyxb.utils.utility.Location(u'avm.xsd', 79, 6))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_8, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Supercedes')), pyxb.utils.utility.Location(u'avm.xsd', 80, 6))
st_8 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_9, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'Formula')), pyxb.utils.utility.Location(u'avm.xsd', 81, 6))
st_9 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_10, False))
symbol = pyxb.binding.content.ElementUse(Component_._UseForTag(pyxb.namespace.ExpandedName(None, u'DomainMapping')), pyxb.utils.utility.Location(u'avm.xsd', 82, 6))
st_10 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
| |
= expt['nu_line'] / (1. + c['z'])
l = 3e8 / (nu * 1e6) # Wavelength (m)
Ddish = expt['Ddish']
# Calculate FOV (180deg * theta for cylinder mode)
fov = np.pi * (l / Ddish) if 'cyl' in expt['mode'] else (l / Ddish)**2.
# Calculate interferometer baseline density, n(u)
if "n(x)" in list(expt.keys()):
# Rescale n(x) with freq.-dependence
#FIXME print "\tUsing user-specified baseline density, n(u)"
x = u / nu # x = u / (freq [MHz])
n_u = expt['n(x)'](x) / nu**2. # n(x) = n(u) * nu^2
n_u[np.where(n_u == 0.)] = 1. / INF_NOISE
else:
# Approximate expression for n(u), assuming uniform density in UV plane
print("\tUsing uniform baseline density, n(u) ~ const.")
u_min = expt['Dmin'] / l; u_max = expt['Dmax'] / l
# Sanity check: Physical area of array must be > combined area of dishes
ff = expt['Ndish'] * (expt['Ddish'] / expt['Dmax'])**2. # Filling factor
print("\tArray filling factor: %3.3f" % ff)
if ff > 1.:
raise ValueError( ("Filling factor is > 1; dishes are too big to "
"fit in specified area (out to Dmax).") )
# Uniform density n(u)
n_u = expt['Ndish']*(expt['Ndish'] - 1.) * l**2. * np.ones(u.shape) \
/ (2. * np.pi * (expt['Dmax']**2. - expt['Dmin']**2.) )
n_u[np.where(u < u_min)] = 1. / INF_NOISE
n_u[np.where(u > u_max)] = 1. / INF_NOISE
# FOV cut-off (disabled for cylinders)
if 'cyl' not in expt['mode']:
l = 3e8 / (nu * 1e6) # Wavelength (m)
u_fov = 1. / np.sqrt(fov)
n_u[np.where(u < u_fov)] = 1. / INF_NOISE
# Gaussian in parallel direction. Perp. direction already accounted for by
# n(u) factor in multiplicity (I)
sigma_kpar = np.sqrt(16.*np.log(2)) * expt['nu_line'] / (expt['dnu'] * c['rnu'])
B_par = (y/(c['rnu']*sigma_kpar))**2.
B_par[np.where(B_par > EXP_OVERFLOW_VAL)] = EXP_OVERFLOW_VAL
invbeam2 = np.exp(B_par)
return invbeam2 / n_u
def dish_response(q, y, cosmo, expt):
"""
Dish multiplicity and beam factors (I * B_perp * B_par) for the noise
covariance of a single-dish mode instrument.
"""
c = cosmo
kperp = q / (c['aperp']*c['r'])
kpar = y / (c['apar']*c['rnu'])
# Define parallel/perp. beam scales
l = 3e8 * (1. + c['z']) / (1e6 * expt['nu_line'])
theta_fwhm = l / expt['Ddish']
sigma_kpar = np.sqrt(16.*np.log(2)) * expt['nu_line'] / (expt['dnu'] * c['rnu'])
sigma_kperp = np.sqrt(16.*np.log(2)) / (c['r'] * theta_fwhm)
# Sanity check: Require that Sarea > Nbeam * (beam)^2
if (expt['Sarea'] < expt['Nbeam'] / (sigma_kperp * c['r'])**2.):
raise ValueError("Sarea is less than (Nbeam * beam^2)")
# Single-dish experiment has Gaussian beams in perp. and par. directions
# (N.B. Check for overflow values and trim them.)
B_tot = (q/(c['r']*sigma_kperp))**2. + (y/(c['rnu']*sigma_kpar))**2.
B_tot[np.where(B_tot > EXP_OVERFLOW_VAL)] = EXP_OVERFLOW_VAL
invbeam2 = np.exp(B_tot)
return invbeam2
def Cnoise(q, y, cosmo, expt, cv=False):
"""
Noise covariance matrix, from last equality in Eq. 25 of Pedro's notes.
A Fourier-space beam has been applied to act as a filter over the survey
volume. Units: mK^2.
"""
c = cosmo
kperp = q / (c['aperp']*c['r'])
kpar = y / (c['apar']*c['rnu'])
nu = expt['nu_line'] / (1. + c['z'])
l = 3e8 / (nu * 1e6) # Wavelength (m)
# Number of receiver polarisation channels (default is two) and dish effic.
npol = expt['npol'] if 'npol' in list(expt.keys()) else 2.
effic = expt['effic'] if 'effic' in list(expt.keys()) else 0.7
effic2 = expt['effic2'] if 'effic2' in list(expt.keys()) else 0.7
# Calculate base noise properties
Vsurvey = expt['Sarea'] * expt['dnutot'] / expt['nu_line']
Tsky = 60e3 * (300.*(1.+c['z']) / expt['nu_line'])**2.55 # Temp. of sky (mK)
Tsys = expt['Tinst'] + Tsky
if 'Tsky_factor' in list(expt.keys()): Tsys += expt['Tsky_factor'] * Tsky
noise = Tsys**2. * Vsurvey / (npol * expt['ttot'] * expt['dnutot'])
if cv: noise = 1. # Cosmic variance-limited calc.
# Multiply noise by mode-specific factors
if expt['mode'][0] == 'i':
# Interferometer mode
print("\tInterferometer mode", end=' ')
# Default effective area / beam FWHM
Aeff = effic * 0.25 * np.pi * expt['Ddish']**2. \
if 'Aeff' not in list(expt.keys()) else expt['Aeff']
theta_b = l / expt['Ddish']
# Evaluate at critical freq.
if 'nu_crit' in list(expt.keys()):
nu_crit = expt['nu_crit']
l_crit = 3e8 / (nu_crit * 1e6)
theta_b_crit = l_crit / expt['Ddish']
# Choose specific mode
if 'cyl' in expt['mode']:
# Cylinder interferometer
print("(cylinder)")
Aeff = effic * expt['Ncyl'] * expt['cyl_area'] / expt['Ndish'] # area per receiver
theta_b = np.sqrt( 0.5 * np.pi * l / expt['Ddish'] ) # FOV ~ 90 deg * l/D
elif 'paf' in expt['mode']:
# PAF interferometer
print("(PAF)")
theta_b = theta_b_crit * (nu_crit / nu) if nu > nu_crit else 1.
elif 'aa' in expt['mode']:
# Aperture array interferometer
print("(aperture array)")
Aeff *= (expt['nu_crit'] / nu)**2. if nu > nu_crit else 1.
theta_b = theta_b_crit * (nu_crit / nu)
else:
# Standard dish interferometer
print("(dish)")
noise *= interferometer_response(q, y, cosmo, expt)
noise *= l**4. / (expt['Nbeam'] * (Aeff * theta_b)**2.)
else:
# Autocorrelation mode
print("\tAutocorrelation mode", end=' ')
Aeff = effic * 0.25 * np.pi * expt['Ddish']**2. \
if 'Aeff' not in list(expt.keys()) else expt['Aeff']
theta_b = l / expt['Ddish']
if 'paf' in expt['mode']:
# PAF autocorrelation mode
print("(PAF)")
noise *= l**4. / (Aeff**2. * theta_b**4.)
noise *= 1. if nu > expt['nu_crit'] else (expt['nu_crit'] / nu)**2.
elif 'hybrid' in expt['mode']:
print("(hybrid array)")
# Standard autocorrelation mode, but with overlapping sub-arrays
# Calculate properties of sub-array 2
Aeff2 = effic2 * 0.25 * np.pi * expt['Ddish2']**2. \
if 'Aeff2' not in list(expt.keys()) else expt['Aeff2']
Tsys2 = expt['Tinst2'] + Tsky
theta_b2 = l / expt['Ddish2']
Nd1 = expt['Ndish']; Nd2 = expt['Ndish2']
# Band boundaries
numax1 = expt['array_numax1']
numax2 = expt['array_numax2']
numin1 = numax1 - expt['array_dnutot1']
numin2 = numax2 - expt['array_dnutot2']
# Decide if overlapping and calculate combined values
if (nu >= numin1 and nu <= numax1):
if (nu >= numin2 and nu <= numax2):
# Full overlap (just average theta_b here)
Ndish_comb = Nd1 + Nd2
theta_b_comb = (Nd1*theta_b + Nd2*theta_b2) / float(Ndish_comb)
avg_AoverT = ((Nd1 * Aeff / Tsys) + (Nd2 * Aeff2 / Tsys2)) \
/ float(Ndish_comb)
else:
# Only array 1
Ndish_comb = Nd1
theta_b_comb = theta_b
avg_AoverT = Aeff / Tsys
elif (nu >= numin2 and nu <= numax2):
# Only array 2
Ndish_comb = Nd2
theta_b_comb = theta_b2
avg_AoverT = Aeff2 / Tsys2
else:
# No overlap; error
raise ValueError("Cnoise(): hybrid array: neither array covers this frequency")
# Noise expression
noise *= l**4. / (avg_AoverT**2. * theta_b_comb**4.)
# Replace previous 1-array values with new 2-array values
noise *= (expt['Ndish'] / float(Ndish_comb)) / Tsys**2.
else:
# Standard dish autocorrelation mode
print("(dish)")
noise *= l**4. / (Aeff**2. * theta_b**4.)
noise *= 1. / (expt['Ndish'] * expt['Nbeam'])
noise *= dish_response(q, y, cosmo, expt)
"""
elif 'comb' in expt['mode']:
print "\tCombined interferometer + single-dish mode"
# Combined dish + interferom. mode
# N.B. For each voxel, this takes the response which has the lowest
# noise (*either* interferom. of single dish), rather than adding the
# inverse noise terms together. This is correct in the CV-limited
# regime, since it prevents double counting of photons, but is
# pessimistic in the noise-dominated regime, since it throws information away
r_int = interferometer_response(q, y, cosmo, expt)
r_dish = dish_response(q, y, cosmo, expt)
#noise *= np.minimum(r_int, r_dish) # Taking the elementwise minimum
noise *= 1./(1./r_int + 1./r_dish) # Adding in quadrature
"""
# FIXME
if 'comb' in expt['mode']: raise NotImplementedError("Combined mode not implemented!")
# Cut-off in parallel direction due to (freq.-dep.) foreground subtraction
kfg = 2.*np.pi * expt['nu_line'] / (expt['survey_dnutot'] * c['rnu'])
kfg *= expt['kfg_fac'] if 'kfg_fac' in list(expt.keys()) else 1.
noise[np.where(np.abs(kpar) < kfg)] = INF_NOISE
# Cut out foreground wedge for interferometers (wedge definition taken
# from Eqs. 5/6 of arXiv:1502.07596). Wedge region is where c*tau <= |b|.
if 'wedge' in list(expt.keys()):
if | |
80,
(83, '-'): 80,
(83, '.'): 80,
(83, '/'): 1,
(83, '0'): 80,
(83, '1'): 80,
(83, '2'): 80,
(83, '3'): 80,
(83, '4'): 80,
(83, '5'): 80,
(83, '6'): 80,
(83, '7'): 80,
(83, '8'): 80,
(83, '9'): 80,
(83, ':'): 80,
(83, ';'): 80,
(83, '<'): 80,
(83, '='): 80,
(83, '>'): 80,
(83, '?'): 80,
(83, '@'): 80,
(83, 'A'): 80,
(83, 'B'): 80,
(83, 'C'): 80,
(83, 'D'): 80,
(83, 'E'): 80,
(83, 'F'): 80,
(83, 'G'): 80,
(83, 'H'): 80,
(83, 'I'): 80,
(83, 'J'): 80,
(83, 'K'): 80,
(83, 'L'): 80,
(83, 'M'): 80,
(83, 'N'): 80,
(83, 'O'): 80,
(83, 'P'): 80,
(83, 'Q'): 80,
(83, 'R'): 80,
(83, 'S'): 80,
(83, 'T'): 80,
(83, 'U'): 80,
(83, 'V'): 80,
(83, 'W'): 80,
(83, 'X'): 80,
(83, 'Y'): 80,
(83, 'Z'): 80,
(83, '['): 80,
(83, '\\'): 80,
(83, ']'): 80,
(83, '^'): 80,
(83, '_'): 80,
(83, '`'): 80,
(83, 'a'): 80,
(83, 'b'): 80,
(83, 'c'): 80,
(83, 'd'): 80,
(83, 'e'): 80,
(83, 'f'): 80,
(83, 'g'): 80,
(83, 'h'): 80,
(83, 'i'): 80,
(83, 'j'): 80,
(83, 'k'): 80,
(83, 'l'): 80,
(83, 'm'): 80,
(83, 'n'): 80,
(83, 'o'): 80,
(83, 'p'): 80,
(83, 'q'): 80,
(83, 'r'): 80,
(83, 's'): 80,
(83, 't'): 80,
(83, 'u'): 80,
(83, 'v'): 80,
(83, 'w'): 80,
(83, 'x'): 80,
(83, 'y'): 80,
(83, 'z'): 80,
(83, '{'): 80,
(83, '|'): 80,
(83, '}'): 80,
(83, '~'): 80,
(83, '\x7f'): 80,
(83, '\x80'): 80,
(83, '\x81'): 80,
(83, '\x82'): 80,
(83, '\x83'): 80,
(83, '\x84'): 80,
(83, '\x85'): 80,
(83, '\x86'): 80,
(83, '\x87'): 80,
(83, '\x88'): 80,
(83, '\x89'): 80,
(83, '\x8a'): 80,
(83, '\x8b'): 80,
(83, '\x8c'): 80,
(83, '\x8d'): 80,
(83, '\x8e'): 80,
(83, '\x8f'): 80,
(83, '\x90'): 80,
(83, '\x91'): 80,
(83, '\x92'): 80,
(83, '\x93'): 80,
(83, '\x94'): 80,
(83, '\x95'): 80,
(83, '\x96'): 80,
(83, '\x97'): 80,
(83, '\x98'): 80,
(83, '\x99'): 80,
(83, '\x9a'): 80,
(83, '\x9b'): 80,
(83, '\x9c'): 80,
(83, '\x9d'): 80,
(83, '\x9e'): 80,
(83, '\x9f'): 80,
(83, '\xa0'): 80,
(83, '\xa1'): 80,
(83, '\xa2'): 80,
(83, '\xa3'): 80,
(83, '\xa4'): 80,
(83, '\xa5'): 80,
(83, '\xa6'): 80,
(83, '\xa7'): 80,
(83, '\xa8'): 80,
(83, '\xa9'): 80,
(83, '\xaa'): 80,
(83, '\xab'): 80,
(83, '\xac'): 80,
(83, '\xad'): 80,
(83, '\xae'): 80,
(83, '\xaf'): 80,
(83, '\xb0'): 80,
(83, '\xb1'): 80,
(83, '\xb2'): 80,
(83, '\xb3'): 80,
(83, '\xb4'): 80,
(83, '\xb5'): 80,
(83, '\xb6'): 80,
(83, '\xb7'): 80,
(83, '\xb8'): 80,
(83, '\xb9'): 80,
(83, '\xba'): 80,
(83, '\xbb'): 80,
(83, '\xbc'): 80,
(83, '\xbd'): 80,
(83, '\xbe'): 80,
(83, '\xbf'): 80,
(83, '\xc0'): 80,
(83, '\xc1'): 80,
(83, '\xc2'): 80,
(83, '\xc3'): 80,
(83, '\xc4'): 80,
(83, '\xc5'): 80,
(83, '\xc6'): 80,
(83, '\xc7'): 80,
(83, '\xc8'): 80,
(83, '\xc9'): 80,
(83, '\xca'): 80,
(83, '\xcb'): 80,
(83, '\xcc'): 80,
(83, '\xcd'): 80,
(83, '\xce'): 80,
(83, '\xcf'): 80,
(83, '\xd0'): 80,
(83, '\xd1'): 80,
(83, '\xd2'): 80,
(83, '\xd3'): 80,
(83, '\xd4'): 80,
(83, '\xd5'): 80,
(83, '\xd6'): 80,
(83, '\xd7'): 80,
(83, '\xd8'): 80,
(83, '\xd9'): 80,
(83, '\xda'): 80,
(83, '\xdb'): 80,
(83, '\xdc'): 80,
(83, '\xdd'): 80,
(83, '\xde'): 80,
(83, '\xdf'): 80,
(83, '\xe0'): 80,
(83, '\xe1'): 80,
(83, '\xe2'): 80,
(83, '\xe3'): 80,
(83, '\xe4'): 80,
(83, '\xe5'): 80,
(83, '\xe6'): 80,
(83, '\xe7'): 80,
(83, '\xe8'): 80,
(83, '\xe9'): 80,
(83, '\xea'): 80,
(83, '\xeb'): 80,
(83, '\xec'): 80,
(83, '\xed'): 80,
(83, '\xee'): 80,
(83, '\xef'): 80,
(83, '\xf0'): 80,
(83, '\xf1'): 80,
(83, '\xf2'): 80,
(83, '\xf3'): 80,
(83, '\xf4'): 80,
(83, '\xf5'): 80,
(83, '\xf6'): 80,
(83, '\xf7'): 80,
(83, '\xf8'): 80,
(83, '\xf9'): 80,
(83, '\xfa'): 80,
(83, '\xfb'): 80,
(83, '\xfc'): 80,
(83, '\xfd'): 80,
(83, '\xfe'): 80,
(83, '\xff'): 80,
(84, '\x00'): 80,
(84, '\x01'): 80,
(84, '\x02'): 80,
(84, '\x03'): 80,
(84, '\x04'): 80,
(84, '\x05'): 80,
(84, '\x06'): 80,
(84, '\x07'): 80,
(84, '\x08'): 80,
(84, '\t'): 80,
(84, '\n'): 80,
(84, '\x0b'): 80,
(84, '\x0c'): 80,
(84, '\r'): 80,
(84, '\x0e'): 80,
(84, '\x0f'): 80,
(84, '\x10'): 80,
(84, '\x11'): 80,
(84, '\x12'): 80,
(84, '\x13'): 80,
(84, '\x14'): 80,
(84, '\x15'): 80,
(84, '\x16'): 80,
(84, '\x17'): 80,
(84, '\x18'): 80,
(84, '\x19'): 80,
(84, '\x1a'): 80,
(84, '\x1b'): 80,
(84, '\x1c'): 80,
(84, '\x1d'): 80,
(84, '\x1e'): 80,
(84, '\x1f'): 80,
(84, ' '): 80,
(84, '!'): 80,
(84, '"'): 80,
(84, '#'): 80,
(84, '$'): 80,
(84, '%'): 80,
(84, '&'): 80,
(84, "'"): 80,
(84, '('): 80,
(84, ')'): 80,
(84, '*'): 83,
(84, '+'): 80,
(84, ','): 80,
(84, '-'): 80,
(84, '.'): 80,
(84, '/'): 85,
(84, '0'): 80,
(84, '1'): 80,
(84, '2'): 80,
(84, '3'): 80,
(84, '4'): 80,
(84, '5'): 80,
(84, '6'): 80,
(84, '7'): 80,
(84, '8'): 80,
(84, '9'): 80,
(84, ':'): 80,
(84, ';'): 80,
(84, '<'): 80,
(84, '='): 80,
(84, '>'): 80,
(84, '?'): 80,
(84, '@'): 80,
(84, 'A'): 80,
(84, 'B'): 80,
(84, 'C'): 80,
(84, 'D'): 80,
(84, 'E'): 80,
(84, 'F'): 80,
(84, 'G'): 80,
(84, 'H'): 80,
(84, 'I'): 80,
(84, 'J'): 80,
(84, 'K'): 80,
(84, 'L'): 80,
(84, 'M'): 80,
(84, 'N'): 80,
(84, 'O'): 80,
(84, 'P'): 80,
(84, 'Q'): 80,
(84, 'R'): 80,
(84, 'S'): 80,
(84, 'T'): 80,
(84, 'U'): 80,
(84, 'V'): 80,
(84, 'W'): 80,
(84, 'X'): 80,
(84, 'Y'): 80,
(84, 'Z'): 80,
(84, '['): 80,
(84, '\\'): 80,
(84, ']'): 80,
(84, '^'): 80,
(84, '_'): 80,
(84, '`'): 80,
(84, 'a'): 80,
(84, 'b'): 80,
(84, 'c'): 80,
(84, 'd'): 80,
(84, 'e'): 80,
(84, 'f'): 80,
(84, 'g'): 80,
(84, 'h'): 80,
(84, 'i'): 80,
(84, 'j'): 80,
(84, 'k'): 80,
(84, 'l'): 80,
(84, 'm'): 80,
(84, 'n'): 80,
(84, 'o'): 80,
(84, 'p'): 80,
(84, 'q'): 80,
(84, 'r'): 80,
(84, 's'): 80,
(84, 't'): 80,
(84, 'u'): 80,
(84, 'v'): 80,
(84, 'w'): 80,
(84, 'x'): 80,
(84, 'y'): 80,
(84, 'z'): 80,
(84, '{'): 80,
(84, '|'): 80,
(84, '}'): 80,
(84, '~'): 80,
(84, '\x7f'): 80,
(84, '\x80'): 80,
(84, '\x81'): 80,
(84, '\x82'): 80,
(84, '\x83'): 80,
(84, '\x84'): 80,
(84, '\x85'): 80,
(84, '\x86'): 80,
(84, '\x87'): 80,
(84, '\x88'): 80,
(84, '\x89'): 80,
(84, '\x8a'): 80,
(84, '\x8b'): 80,
(84, '\x8c'): 80,
(84, '\x8d'): 80,
(84, '\x8e'): 80,
(84, '\x8f'): 80,
(84, '\x90'): 80,
(84, '\x91'): 80,
(84, '\x92'): 80,
(84, '\x93'): 80,
(84, '\x94'): 80,
(84, '\x95'): 80,
(84, '\x96'): 80,
(84, '\x97'): 80,
(84, '\x98'): 80,
(84, '\x99'): 80,
(84, '\x9a'): 80,
(84, '\x9b'): 80,
(84, '\x9c'): 80,
(84, '\x9d'): 80,
(84, '\x9e'): 80,
(84, '\x9f'): 80,
(84, '\xa0'): 80,
(84, '\xa1'): 80,
(84, '\xa2'): 80,
(84, '\xa3'): 80,
(84, '\xa4'): 80,
(84, '\xa5'): 80,
(84, '\xa6'): 80,
(84, '\xa7'): 80,
(84, '\xa8'): 80,
(84, '\xa9'): 80,
(84, '\xaa'): 80,
(84, '\xab'): 80,
(84, '\xac'): 80,
(84, '\xad'): 80,
(84, '\xae'): 80,
(84, '\xaf'): 80,
(84, '\xb0'): 80,
(84, '\xb1'): 80,
(84, '\xb2'): 80,
(84, '\xb3'): 80,
(84, '\xb4'): 80,
(84, '\xb5'): 80,
(84, '\xb6'): 80,
(84, '\xb7'): 80,
(84, '\xb8'): 80,
(84, '\xb9'): 80,
(84, '\xba'): 80,
(84, '\xbb'): 80,
(84, '\xbc'): 80,
(84, '\xbd'): 80,
(84, '\xbe'): 80,
(84, '\xbf'): 80,
(84, '\xc0'): 80,
(84, '\xc1'): 80,
(84, '\xc2'): 80,
(84, '\xc3'): 80,
(84, '\xc4'): 80,
(84, '\xc5'): 80,
(84, '\xc6'): 80,
(84, '\xc7'): 80,
(84, '\xc8'): 80,
(84, '\xc9'): 80,
(84, '\xca'): 80,
(84, '\xcb'): 80,
(84, '\xcc'): 80,
(84, '\xcd'): 80,
(84, '\xce'): 80,
(84, '\xcf'): 80,
(84, '\xd0'): 80,
(84, '\xd1'): 80,
(84, '\xd2'): 80,
(84, '\xd3'): 80,
(84, '\xd4'): 80,
(84, '\xd5'): 80,
(84, '\xd6'): 80,
| |
0.5*m.x1649 - 0.5*m.x1650 - 0.5*m.x1843 - 0.5*m.x1844 + m.x3575
== 0)
m.c3530 = Constraint(expr= - 0.5*m.x1262 - 0.5*m.x1263 - 0.5*m.x1650 - 0.5*m.x1651 - 0.5*m.x1844 - 0.5*m.x1845 + m.x3576
== 0)
m.c3531 = Constraint(expr= - 0.5*m.x1263 - 0.5*m.x1264 - 0.5*m.x1651 - 0.5*m.x1652 - 0.5*m.x1845 - 0.5*m.x1846 + m.x3577
== 0)
m.c3532 = Constraint(expr= - 0.5*m.x1264 - 0.5*m.x1265 - 0.5*m.x1652 - 0.5*m.x1653 - 0.5*m.x1846 - 0.5*m.x1847 + m.x3578
== 0)
m.c3533 = Constraint(expr= - 0.5*m.x1265 - 0.5*m.x1266 - 0.5*m.x1653 - 0.5*m.x1654 - 0.5*m.x1847 - 0.5*m.x1848 + m.x3579
== 0)
m.c3534 = Constraint(expr= - 0.5*m.x1266 - 0.5*m.x1267 - 0.5*m.x1654 - 0.5*m.x1655 - 0.5*m.x1848 - 0.5*m.x1849 + m.x3580
== 0)
m.c3535 = Constraint(expr= - 0.5*m.x1267 - 0.5*m.x1268 - 0.5*m.x1655 - 0.5*m.x1656 - 0.5*m.x1849 - 0.5*m.x1850 + m.x3581
== 0)
m.c3536 = Constraint(expr= - 0.5*m.x1268 - 0.5*m.x1269 - 0.5*m.x1656 - 0.5*m.x1657 - 0.5*m.x1850 - 0.5*m.x1851 + m.x3582
== 0)
m.c3537 = Constraint(expr= - 0.5*m.x1269 - 0.5*m.x1270 - 0.5*m.x1657 - 0.5*m.x1658 - 0.5*m.x1851 - 0.5*m.x1852 + m.x3583
== 0)
m.c3538 = Constraint(expr= - 0.5*m.x1270 - 0.5*m.x1271 - 0.5*m.x1658 - 0.5*m.x1659 - 0.5*m.x1852 - 0.5*m.x1853 + m.x3584
== 0)
m.c3539 = Constraint(expr= - 0.5*m.x1271 - 0.5*m.x1272 - 0.5*m.x1659 - 0.5*m.x1660 - 0.5*m.x1853 - 0.5*m.x1854 + m.x3585
== 0)
m.c3540 = Constraint(expr= - 0.5*m.x1272 - 0.5*m.x1273 - 0.5*m.x1660 - 0.5*m.x1661 - 0.5*m.x1854 - 0.5*m.x1855 + m.x3586
== 0)
m.c3541 = Constraint(expr= - 0.5*m.x1273 - 0.5*m.x1274 - 0.5*m.x1661 - 0.5*m.x1662 - 0.5*m.x1855 - 0.5*m.x1856 + m.x3587
== 0)
m.c3542 = Constraint(expr= - 0.5*m.x1274 - 0.5*m.x1275 - 0.5*m.x1662 - 0.5*m.x1663 - 0.5*m.x1856 - 0.5*m.x1857 + m.x3588
== 0)
m.c3543 = Constraint(expr= - 0.5*m.x1275 - 0.5*m.x1276 - 0.5*m.x1663 - 0.5*m.x1664 - 0.5*m.x1857 - 0.5*m.x1858 + m.x3589
== 0)
m.c3544 = Constraint(expr= - 0.5*m.x1276 - 0.5*m.x1277 - 0.5*m.x1664 - 0.5*m.x1665 - 0.5*m.x1858 - 0.5*m.x1859 + m.x3590
== 0)
m.c3545 = Constraint(expr= - 0.5*m.x1277 - 0.5*m.x1278 - 0.5*m.x1665 - 0.5*m.x1666 - 0.5*m.x1859 - 0.5*m.x1860 + m.x3591
== 0)
m.c3546 = Constraint(expr= - 0.5*m.x1278 - 0.5*m.x1279 - 0.5*m.x1666 - 0.5*m.x1667 - 0.5*m.x1860 - 0.5*m.x1861 + m.x3592
== 0)
m.c3547 = Constraint(expr= - 0.5*m.x1279 - 0.5*m.x1280 - 0.5*m.x1667 - 0.5*m.x1668 - 0.5*m.x1861 - 0.5*m.x1862 + m.x3593
== 0)
m.c3548 = Constraint(expr= - 0.5*m.x1280 - 0.5*m.x1281 - 0.5*m.x1668 - 0.5*m.x1669 - 0.5*m.x1862 - 0.5*m.x1863 + m.x3594
== 0)
m.c3549 = Constraint(expr= - 0.5*m.x1281 - 0.5*m.x1282 - 0.5*m.x1669 - 0.5*m.x1670 - 0.5*m.x1863 - 0.5*m.x1864 + m.x3595
== 0)
m.c3550 = Constraint(expr= - 0.5*m.x1282 - 0.5*m.x1283 - 0.5*m.x1670 - 0.5*m.x1671 - 0.5*m.x1864 - 0.5*m.x1865 + m.x3596
== 0)
m.c3551 = Constraint(expr= - 0.5*m.x1283 - 0.5*m.x1284 - 0.5*m.x1671 - 0.5*m.x1672 - 0.5*m.x1865 - 0.5*m.x1866 + m.x3597
== 0)
m.c3552 = Constraint(expr= - 0.5*m.x1284 - 0.5*m.x1285 - 0.5*m.x1672 - 0.5*m.x1673 - 0.5*m.x1866 - 0.5*m.x1867 + m.x3598
== 0)
m.c3553 = Constraint(expr= - 0.5*m.x1285 - 0.5*m.x1286 - 0.5*m.x1673 - 0.5*m.x1674 - 0.5*m.x1867 - 0.5*m.x1868 + m.x3599
== 0)
m.c3554 = Constraint(expr= - 0.5*m.x1286 - 0.5*m.x1287 - 0.5*m.x1674 - 0.5*m.x1675 - 0.5*m.x1868 - 0.5*m.x1869 + m.x3600
== 0)
m.c3555 = Constraint(expr= - 0.5*m.x1287 - 0.5*m.x1288 - 0.5*m.x1675 - 0.5*m.x1676 - 0.5*m.x1869 - 0.5*m.x1870 + m.x3601
== 0)
m.c3556 = Constraint(expr= - 0.5*m.x1288 - 0.5*m.x1289 - 0.5*m.x1676 - 0.5*m.x1677 - 0.5*m.x1870 - 0.5*m.x1871 + m.x3602
== 0)
m.c3557 = Constraint(expr= - 0.5*m.x1289 - 0.5*m.x1290 - 0.5*m.x1677 - 0.5*m.x1678 - 0.5*m.x1871 - 0.5*m.x1872 + m.x3603
== 0)
m.c3558 = Constraint(expr= - 0.5*m.x1290 - 0.5*m.x1291 - 0.5*m.x1678 - 0.5*m.x1679 - 0.5*m.x1872 - 0.5*m.x1873 + m.x3604
== 0)
m.c3559 = Constraint(expr= - 0.5*m.x1291 - 0.5*m.x1292 - 0.5*m.x1679 - 0.5*m.x1680 - 0.5*m.x1873 - 0.5*m.x1874 + m.x3605
== 0)
m.c3560 = Constraint(expr= - 0.5*m.x1292 - 0.5*m.x1293 - 0.5*m.x1680 - 0.5*m.x1681 - 0.5*m.x1874 - 0.5*m.x1875 + m.x3606
== 0)
m.c3561 = Constraint(expr= - 0.5*m.x1293 - 0.5*m.x1294 - 0.5*m.x1681 - 0.5*m.x1682 - 0.5*m.x1875 - 0.5*m.x1876 + m.x3607
== 0)
m.c3562 = Constraint(expr= - 0.5*m.x1294 - 0.5*m.x1295 - 0.5*m.x1682 - 0.5*m.x1683 - 0.5*m.x1876 - 0.5*m.x1877 + m.x3608
== 0)
m.c3563 = Constraint(expr= - 0.5*m.x1295 - 0.5*m.x1296 - 0.5*m.x1683 - 0.5*m.x1684 - 0.5*m.x1877 - 0.5*m.x1878 + m.x3609
== 0)
m.c3564 = Constraint(expr= - 0.5*m.x1296 - 0.5*m.x1297 - 0.5*m.x1684 - 0.5*m.x1685 - 0.5*m.x1878 - 0.5*m.x1879 + m.x3610
== 0)
m.c3565 = Constraint(expr= - 0.5*m.x1297 - 0.5*m.x1298 - 0.5*m.x1685 - 0.5*m.x1686 - 0.5*m.x1879 - 0.5*m.x1880 + m.x3611
== 0)
m.c3566 = Constraint(expr= - 0.5*m.x1298 - 0.5*m.x1299 - 0.5*m.x1686 - 0.5*m.x1687 - 0.5*m.x1880 - 0.5*m.x1881 + m.x3612
== 0)
m.c3567 = Constraint(expr= - 0.5*m.x1299 - 0.5*m.x1300 - 0.5*m.x1687 - 0.5*m.x1688 - 0.5*m.x1881 - 0.5*m.x1882 + m.x3613
== 0)
m.c3568 = Constraint(expr= - 0.5*m.x1300 - 0.5*m.x1301 - 0.5*m.x1688 - 0.5*m.x1689 - 0.5*m.x1882 - 0.5*m.x1883 + m.x3614
== 0)
m.c3569 = Constraint(expr= - 0.5*m.x1301 - 0.5*m.x1302 - 0.5*m.x1689 - 0.5*m.x1690 - 0.5*m.x1883 - 0.5*m.x1884 + m.x3615
== 0)
m.c3570 = Constraint(expr= - 0.5*m.x1302 - 0.5*m.x1303 - 0.5*m.x1690 - 0.5*m.x1691 - 0.5*m.x1884 - 0.5*m.x1885 + m.x3616
== 0)
m.c3571 = Constraint(expr= - 0.5*m.x1304 - 0.5*m.x1305 - 0.5*m.x1692 - 0.5*m.x1693 - 0.5*m.x1886 - 0.5*m.x1887 + m.x3617
== 0)
m.c3572 = Constraint(expr= - 0.5*m.x1305 - 0.5*m.x1306 - 0.5*m.x1693 - 0.5*m.x1694 - 0.5*m.x1887 - 0.5*m.x1888 + m.x3618
== 0)
m.c3573 = Constraint(expr= - 0.5*m.x1306 - 0.5*m.x1307 - 0.5*m.x1694 - 0.5*m.x1695 - 0.5*m.x1888 - 0.5*m.x1889 + m.x3619
== 0)
m.c3574 = Constraint(expr= - 0.5*m.x1307 - 0.5*m.x1308 - 0.5*m.x1695 - 0.5*m.x1696 - 0.5*m.x1889 - 0.5*m.x1890 + m.x3620
== 0)
m.c3575 = Constraint(expr= - 0.5*m.x1308 - 0.5*m.x1309 - 0.5*m.x1696 - 0.5*m.x1697 - 0.5*m.x1890 - 0.5*m.x1891 + m.x3621
== 0)
m.c3576 = Constraint(expr= - 0.5*m.x1309 - 0.5*m.x1310 - 0.5*m.x1697 - 0.5*m.x1698 - 0.5*m.x1891 - 0.5*m.x1892 + m.x3622
== 0)
m.c3577 = Constraint(expr= - 0.5*m.x1310 - 0.5*m.x1311 - 0.5*m.x1698 - 0.5*m.x1699 - 0.5*m.x1892 - 0.5*m.x1893 + m.x3623
== 0)
m.c3578 = Constraint(expr= - 0.5*m.x1311 - 0.5*m.x1312 - 0.5*m.x1699 - 0.5*m.x1700 - 0.5*m.x1893 - 0.5*m.x1894 + m.x3624
== 0)
m.c3579 = Constraint(expr= - 0.5*m.x1312 - 0.5*m.x1313 - 0.5*m.x1700 - 0.5*m.x1701 - 0.5*m.x1894 - 0.5*m.x1895 + m.x3625
== 0)
m.c3580 = Constraint(expr= - 0.5*m.x1313 - 0.5*m.x1314 - 0.5*m.x1701 - 0.5*m.x1702 - 0.5*m.x1895 - 0.5*m.x1896 + m.x3626
== 0)
m.c3581 = Constraint(expr= - 0.5*m.x1314 - 0.5*m.x1315 - 0.5*m.x1702 - 0.5*m.x1703 - 0.5*m.x1896 - 0.5*m.x1897 + m.x3627
== 0)
m.c3582 = Constraint(expr= - 0.5*m.x1315 - 0.5*m.x1316 - 0.5*m.x1703 - 0.5*m.x1704 - 0.5*m.x1897 - 0.5*m.x1898 + m.x3628
== 0)
m.c3583 = Constraint(expr= - 0.5*m.x1316 - 0.5*m.x1317 - 0.5*m.x1704 - 0.5*m.x1705 - 0.5*m.x1898 - 0.5*m.x1899 + m.x3629
== 0)
m.c3584 = Constraint(expr= - 0.5*m.x1317 - 0.5*m.x1318 - 0.5*m.x1705 - 0.5*m.x1706 - 0.5*m.x1899 - 0.5*m.x1900 + m.x3630
== 0)
m.c3585 = Constraint(expr= - 0.5*m.x1318 - 0.5*m.x1319 - 0.5*m.x1706 - 0.5*m.x1707 - 0.5*m.x1900 - 0.5*m.x1901 + m.x3631
== 0)
m.c3586 = Constraint(expr= - 0.5*m.x1319 - 0.5*m.x1320 - 0.5*m.x1707 - 0.5*m.x1708 - 0.5*m.x1901 - 0.5*m.x1902 + m.x3632
== 0)
m.c3587 = Constraint(expr= - 0.5*m.x1320 - 0.5*m.x1321 - 0.5*m.x1708 - 0.5*m.x1709 - 0.5*m.x1902 - 0.5*m.x1903 + m.x3633
== 0)
m.c3588 = Constraint(expr= - 0.5*m.x1321 - 0.5*m.x1322 - 0.5*m.x1709 - 0.5*m.x1710 - 0.5*m.x1903 - 0.5*m.x1904 + m.x3634
== 0)
m.c3589 = Constraint(expr= - 0.5*m.x1322 - 0.5*m.x1323 - 0.5*m.x1710 - 0.5*m.x1711 - 0.5*m.x1904 - 0.5*m.x1905 + m.x3635
== 0)
m.c3590 = Constraint(expr= - 0.5*m.x1323 - 0.5*m.x1324 - 0.5*m.x1711 - 0.5*m.x1712 - 0.5*m.x1905 - 0.5*m.x1906 + m.x3636
== 0)
m.c3591 = Constraint(expr= - 0.5*m.x1324 - 0.5*m.x1325 - 0.5*m.x1712 - 0.5*m.x1713 - 0.5*m.x1906 - 0.5*m.x1907 + m.x3637
== 0)
m.c3592 = Constraint(expr= - 0.5*m.x1325 - 0.5*m.x1326 - 0.5*m.x1713 - 0.5*m.x1714 - 0.5*m.x1907 - 0.5*m.x1908 + m.x3638
== 0)
m.c3593 = Constraint(expr= - 0.5*m.x1326 - 0.5*m.x1327 - 0.5*m.x1714 - 0.5*m.x1715 - 0.5*m.x1908 - 0.5*m.x1909 + m.x3639
== 0)
m.c3594 = Constraint(expr= - 0.5*m.x1327 - 0.5*m.x1328 - 0.5*m.x1715 - 0.5*m.x1716 - 0.5*m.x1909 - 0.5*m.x1910 + m.x3640
== 0)
m.c3595 = Constraint(expr= - 0.5*m.x1328 - 0.5*m.x1329 - 0.5*m.x1716 - 0.5*m.x1717 - 0.5*m.x1910 - 0.5*m.x1911 + m.x3641
== 0)
m.c3596 = Constraint(expr= - 0.5*m.x1329 - 0.5*m.x1330 - 0.5*m.x1717 - 0.5*m.x1718 - 0.5*m.x1911 - 0.5*m.x1912 + m.x3642
== 0)
m.c3597 = Constraint(expr= - 0.5*m.x1330 - 0.5*m.x1331 - 0.5*m.x1718 - 0.5*m.x1719 - 0.5*m.x1912 - 0.5*m.x1913 + m.x3643
== 0)
m.c3598 = Constraint(expr= - 0.5*m.x1331 - 0.5*m.x1332 - 0.5*m.x1719 - 0.5*m.x1720 - 0.5*m.x1913 - 0.5*m.x1914 + m.x3644
== 0)
m.c3599 = Constraint(expr= - 0.5*m.x1332 - 0.5*m.x1333 - 0.5*m.x1720 - 0.5*m.x1721 - 0.5*m.x1914 - 0.5*m.x1915 + m.x3645
== 0)
m.c3600 = Constraint(expr= - 0.5*m.x1333 - 0.5*m.x1334 - 0.5*m.x1721 - | |
QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicyFF = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicyFF.setHorizontalStretch(0)
sizePolicyFF.setVerticalStretch(0)
sizePolicyFF.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicyFF)
# self.pushButton.setGeometry(QtCore.QRect(690, 160, 101, 23))
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 6, 6, 1, 2)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_2.setGeometry(QtCore.QRect(690, 130, 101, 23))
sizePolicyMF = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicyMF.setHorizontalStretch(0)
sizePolicyMF.setVerticalStretch(0)
sizePolicyMF.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicyMF)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 5, 6, 1, 2)
# self.incr = 0
self.max_prog = 0
self.pushButton_2.clicked.connect(self.scan_mods)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_3.setGeometry(QtCore.QRect(690, 220, 101, 23))
sizePolicyMF.setHeightForWidth(self.pushButton_3.sizePolicy().hasHeightForWidth())
self.pushButton_3.setSizePolicy(sizePolicyMF)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout.addWidget(self.pushButton_3, 8, 6, 1, 2)
# self.pushButton_3.clicked.connect(self.profile_name)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
sizePolicyMF.setHeightForWidth(self.pushButton_4.sizePolicy().hasHeightForWidth())
self.pushButton_4.setSizePolicy(sizePolicyMF)
# self.pushButton_4.setGeometry(QtCore.QRect(690, 190, 101, 23))
self.pushButton_4.setObjectName("pushButton_4")
self.gridLayout.addWidget(self.pushButton_4, 7, 6, 1, 2)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
sizePolicyFF.setHeightForWidth(self.pushButton_5.sizePolicy().hasHeightForWidth())
self.pushButton_5.setSizePolicy(sizePolicyFF)
self.pushButton_5.setMaximumSize(QtCore.QSize(25, 22))
# self.pushButton_5.setGeometry(QtCore.QRect(660, 20, 25, 23))
self.gridLayout.addWidget(self.pushButton_5, 1, 5, 1, 1)
self.pushButton_5.setObjectName("toolButton")
self.pushButton_5.clicked.connect(self.get_dir)
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_6.setGeometry(QtCore.QRect(690, 100, 101, 23))
sizePolicyMF.setHeightForWidth(self.pushButton_6.sizePolicy().hasHeightForWidth())
self.pushButton_6.setSizePolicy(sizePolicyMF)
self.pushButton_6.setObjectName("pushButton_6")
self.gridLayout.addWidget(self.pushButton_6, 4, 6, 1, 2)
self.pushButton_6.clicked.connect(self.pick_profile)
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_7.setGeometry(QtCore.QRect(690, 70, 101, 23))
sizePolicyMF.setHeightForWidth(self.pushButton_7.sizePolicy().hasHeightForWidth())
self.pushButton_7.setSizePolicy(sizePolicyMF)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout.addWidget(self.pushButton_7, 3, 6, 1, 2)
self.pushButton_7.clicked.connect(self.clear_table)
self.pushButton_8 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_8.setGeometry(QtCore.QRect(5, 47, 65, 19))
sizePolicyFF.setHeightForWidth(self.pushButton_8.sizePolicy().hasHeightForWidth())
self.pushButton_8.setSizePolicy(sizePolicyFF)
self.pushButton_8.setObjectName("pushButton_8")
self.pushButton_8.setMaximumSize(QtCore.QSize(65, 19))
self.gridLayout.addWidget(self.pushButton_8, 2, 0, 1, 1)
self.btn_st = None
self.pushButton_8.clicked.connect(self.sel_all)
self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
# self.pushButton_9.setGeometry(QtCore.QRect(234, 47, 25, 19))
sizePolicyFF.setHeightForWidth(self.pushButton_9.sizePolicy().hasHeightForWidth())
self.pushButton_9.setSizePolicy(sizePolicyFF)
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_9.setMaximumSize(QtCore.QSize(25, 19))
self.gridLayout.addWidget(self.pushButton_9, 2, 3, 1, 1)
self.pushButton_9.clicked.connect(self.get_checked)
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
sizePolicyFF.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicyFF)
# self.comboBox.setGeometry(QtCore.QRect(125, 47, 105, 19))
self.comboBox.addItem("")
self.comboBox.addItem("")
self.comboBox.setObjectName("comboBox")
self.comboBox.setMaximumSize(QtCore.QSize(105, 19))
self.gridLayout.addWidget(self.comboBox, 2, 2, 1, 1)
# self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
# self.progressBar.setGeometry(QtCore.QRect(5, 508, 681, 20))
# self.progressBar.setProperty("value", 0)
# self.progressBar.setObjectName("progressBar")
# self.gridLayout.addWidget(self.progressBar, 13, 0, 1, 6)
# self.progressBar.hide()
self.progressBar_1 = QtWidgets.QProgressBar(self.centralwidget)
# self.progressBar_1.setGeometry(QtCore.QRect(5, 505, 716, 12))
self.progressBar_1.setProperty("value", 0)
self.progressBar_1.setObjectName("progressBar_1")
self.gridLayout.addWidget(self.progressBar_1, 13, 0, 1, 6)
self.progressBar_1.hide()
# self.progressBar_2 = QtWidgets.QProgressBar(self.centralwidget)
# self.progressBar_2.setGeometry(QtCore.QRect(5, 532, 716, 12))
# self.progressBar_2.setProperty("value", 0)
# self.progressBar_2.setObjectName("progressBar_2")
# self.progressBar_2.hide()
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(5, 530, 671, 16))
self.label.setObjectName("label")
self.label.hide()
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
# self.tableWidget.setGeometry(QtCore.QRect(5, 70, 681, 431))
sizePolicyEE = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicyEE.setHorizontalStretch(0)
sizePolicyEE.setVerticalStretch(0)
sizePolicyEE.setHeightForWidth(self.tableWidget.sizePolicy().hasHeightForWidth())
self.tableWidget.setSizePolicy(sizePolicyEE)
self.tableWidget.setMinimumSize(QtCore.QSize(681, 431))
self.tableWidget.setObjectName("tableWidget")
self.gridLayout.addWidget(self.tableWidget, 3, 0, 10, 6)
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(row_count)
self.tableWidget.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
#item = QtWidgets.QTableWidgetItem()
#self.tableWidget.setVerticalHeaderItem(0, item)
#item = QtWidgets.QTableWidgetItem()
#self.tableWidget.setVerticalHeaderItem(1, item)
#item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderLabels(['Mod Name', 'Current', 'Update', 'Changelog'])
#item = QtWidgets.QTableWidgetItem()
#self.tableWidget.setHorizontalHeaderItem(1, item)
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)
header.setSectionResizeMode(3, QtWidgets.QHeaderView.Stretch)
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
# self.lineEdit.setGeometry(QtCore.QRect(5, 20, 651, 23))
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 1, 0, 1, 5)
self.lineEdit.setReadOnly(False)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
# self.label_2.setGeometry(QtCore.QRect(6, 3, 71, 16))
sizePolicyFF.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicyFF)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
# self.label_3.setGeometry(QtCore.QRect(690, 263, 31, 16))
sizePolicyFF.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicyFF)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 10, 6, 1, 1)
# self.label_3.hide()
self.label_4 = QtWidgets.QLabel(self.centralwidget)
# self.label_4.setGeometry(QtCore.QRect(750, 263, 21, 16))
sizePolicyFF.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicyFF)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 10, 7, 1, 1)
# self.label_4.hide()
self.label_5 = QtWidgets.QLabel(self.centralwidget)
# self.label_5.setGeometry(QtCore.QRect(690, 280, 47, 13))
sizePolicyFF.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicyFF)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 11, 6, 1, 1)
self.label_5.hide()
self.label_6 = QtWidgets.QLabel(self.centralwidget)
# self.label_6.setGeometry(QtCore.QRect(750, 279, 41, 16))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.label_6.setPalette(palette)
sizePolicyFF.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicyFF)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 11, 7, 1, 1)
self.label_6.hide()
self.filler_label = QtWidgets.QLabel(self.centralwidget)
# self.label_5.setGeometry(QtCore.QRect(690, 280, 47, 13))
sizePolicyEE.setHeightForWidth(self.filler_label.sizePolicy().hasHeightForWidth())
self.filler_label.setSizePolicy(sizePolicyEE)
self.filler_label.setObjectName("emptyWidget")
self.gridLayout.addWidget(self.filler_label, 12, 6, 1, 2)
self.label_7 = QtWidgets.QLabel(self.centralwidget)
# self.label_7.setGeometry(QtCore.QRect(80, 50, 47, 13))
sizePolicyFF.setHeightForWidth(self.label_7.sizePolicy().hasHeightForWidth())
self.label_7.setSizePolicy(sizePolicyFF)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 2, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.centralwidget)
# self.label_8.setGeometry(QtCore.QRect(690, 250, 47, 13))
sizePolicyFF.setHeightForWidth(self.label_8.sizePolicy().hasHeightForWidth())
self.label_8.setSizePolicy(sizePolicyFF)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 9, 6, 1, 1)
# self.label_8.hide()
self.label_9 = QtWidgets.QLabel(self.centralwidget)
# self.label_9.setGeometry(QtCore.QRect(750, 250, 47, 13))
sizePolicyFF.setHeightForWidth(self.label_9.sizePolicy().hasHeightForWidth())
self.label_9.setSizePolicy(sizePolicyFF)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 9, 7, 1, 1)
# self.label_9.hide()
# self.label_10 = QtWidgets.QLabel(self.centralwidget)
# self.label_10.setGeometry(QtCore.QRect(5, 542, 681, 16))
# self.label_10.setObjectName("label_10")
# self.label_10.hide()
self.label_11 = QtWidgets.QLabel(self.centralwidget)
# self.label_11.setGeometry(QtCore.QRect(5, 515, 681, 16))
sizePolicyFF.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
self.label_11.setSizePolicy(sizePolicyFF)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 14, 0, 1, 6)
self.label_11.hide()
# self.label_12 = QtWidgets.QLabel(self.centralwidget)
# self.label_12.setGeometry(QtCore.QRect(5, 532, 681, 16))
# self.label_12.setObjectName("label_12")
# self.label_12.hide()
self.label_6.mousePressEvent = self.no_ID_msg
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 795, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.actionLoad_Profile = QtWidgets.QAction(MainWindow)
# self.actionLoad_Profile.setObjectName("actionLoad_Profile")
self.actionClear_Cache = QtWidgets.QAction(MainWindow)
self.actionClear_Cache.setObjectName("actionClear_Cache")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
# self.menuFile.addAction(self.actionLoad_Profile)
self.menuFile.addAction(self.actionClear_Cache)
self.menuFile.addAction(self.actionExit)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Minecraft Mod Updater"))
self.pushButton_6.setText(_translate("MainWindow", "Load Profile"))
self.pushButton_2.setText(_translate("MainWindow", "Scan Mods"))
self.pushButton.setText(_translate("MainWindow", "Check for Update"))
self.pushButton_3.setText(_translate("MainWindow", "Restore"))
self.pushButton_4.setText(_translate("MainWindow", "Update"))
self.pushButton_5.setText(_translate("MainWindow", "..."))
self.pushButton_7.setText(_translate("MainWindow", "Clear"))
self.pushButton_8.setText(_translate("MainWindow", "Select All"))
self.pushButton_9.setText(_translate("MainWindow", "Go"))
self.comboBox.setItemText(0, _translate("MainWindow", "All"))
self.comboBox.setItemText(1, _translate("MainWindow", "Update Available"))
self.label_7.setText(_translate("MainWindow", "Filter by:"))
self.label.setText(_translate("MainWindow", "TextLabel"))
#item = self.tableWidget.verticalHeaderItem(0)
#item.setText(_translate("MainWindow", "New Row"))
#item = self.tableWidget.verticalHeaderItem(1)
#item.setText(_translate("MainWindow", "New Row"))
#item = self.tableWidget.horizontalHeaderItem(0)
#item.setText(_translate("MainWindow", "New Column"))
#item = self.tableWidget.horizontalHeaderItem(1)
#item.setText(_translate("MainWindow", "New Column"))
self.label_2.setText(_translate("MainWindow", "Mod Directory:"))
self.label_3.setText(_translate("MainWindow", "Mods:"))
self.label_4.setText(_translate("MainWindow", ""))
self.label_5.setText(_translate("MainWindow", "Not ID\'d:"))
self.label_6.setText(_translate("MainWindow", ""))
self.label_8.setText(_translate("MainWindow", "MC Vers:"))
self.label_9.setText(_translate("MainWindow", ""))
self.label_11.setText(_translate("MainWindow", "Prog1"))
self.filler_label.setText(_translate("MainWindow", ""))
# self.label_10.setText(_translate("MainWindow", "Prog2"))
# self.label_12.setText(_translate("MainWindow", "Prog1_2"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
# self.actionLoad_Profile.setText(_translate("MainWindow", "Load Profile"))
self.actionClear_Cache.setText(_translate("MainWindow", "Clear Cache"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
def sel_all(self):
if self.btn_st:
self.pushButton_8.setText("Select All")
self.btn_st = None
for i in range(self.tableWidget.rowCount()):
self.tableWidget.item(i, 0).setCheckState(QtCore.Qt.Unchecked)
else:
self.pushButton_8.setText("Deselect All")
self.btn_st = True
for i in range(self.tableWidget.rowCount()):
self.tableWidget.item(i, 0).setCheckState(QtCore.Qt.Checked)
def pick_profile(self):
cur_dir = os.path.dirname(os.path.realpath(__file__))
cur_dir = cur_dir.replace('C:', '').replace('\\', '/')
profiles = []
no_profiles = True
for file in os.listdir(cur_dir):
if file.endswith(".json"):
no_profiles = False
profile = file.replace('.json', '')
profiles.append(profile)
if no_profiles:
self.no_profiles()
return
else:
dia_show = Dialog(profiles, cur_dir)
dia_show.dia_sig.connect(self.add_rows)
dia_show.dia_sig2.connect(self.update_mod_dir)
dia_show.dia_sig3.connect(self.no_ID)
dia_show.dia_sig4.connect(self.get_mc_vers)
dia_show.exec_()
@staticmethod
def no_profiles():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle('Whoops')
msg.setText("There are currently no profiles available.")
msg.exec()
def update_mod_dir(self, mod_dir):
if system() == 'Windows':
mod_dir = 'C:' + mod_dir
mod_dir = mod_dir.replace('/', '\\')
self.lineEdit.setText(mod_dir)
def add_rows(self, mod_name_var, installed_file):
table = self.tableWidget
rows = table.rowCount()
table.insertRow(rows)
mod_name = QtWidgets.QTableWidgetItem(mod_name_var)
mod_name.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
mod_name.setCheckState(QtCore.Qt.Unchecked)
# mine_vers = QtWidgets.QTableWidgetItem(mine_vers)
installed_file = QtWidgets.QTableWidgetItem(installed_file)
# new_file = QtWidgets.QTableWidgetItem(new_file)
# changelog = QtWidgets.QTableWidgetItem(changelog)
table.setItem(rows, 0, mod_name)
# table.setItem(rows, 1, mine_vers)
table.setItem(rows, 1, installed_file)
table.scrollToBottom()
# lastIndex = table.currentIndex()
# item = table.item(lastIndex, 0)
# table.scrollToItem(item, QtWidgets.QAbstractItemView.PositionAtTop)
# table.selectRow(lastIndex)
# table.setItem(rows, 3, new_file)
# table.setItem(rows, 4, changelog)
rows = table.rowCount()
if not self.label_4.isVisible():
self.label_3.show()
self.label_4.show()
self.label_4.setText(str(rows))
# table.resizeColumnsToContents()
header = self.tableWidget.horizontalHeader()
header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)
header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)
def scan_mods(self):
test_prot = False # used to test mod finding; stops at the 3rd mod
mod_dir = self.lineEdit.text()
if not mod_dir:
self.dir_empty()
return
if not os.path.isdir(mod_dir):
self.bad_dir()
return
if not os.listdir(mod_dir):
self.no_mods()
return
mods = False
for fname in os.listdir(mod_dir):
if fname.endswith('.jar'):
mods = True
if not mods:
self.no_mods()
return
if 'C:' in mod_dir:
mod_dir = mod_dir.replace('\\', '/').replace('C:', '')
self.incr = 0
self.update_prog_1(False, None, None)
self.scanmods = Scan_Mods(mod_dir, test_prot)
self.scanmods.sig1.connect(self.add_rows)
self.scanmods.sig2.connect(self.conn_err)
self.scanmods.sig3.connect(self.no_ID)
self.scanmods.sig4.connect(self.profile_name)
self.scanmods.sig5.connect(self.update_prog_1)
self.scanmods.sig6.connect(self.done_scanning)
self.scanmods.sig7.connect(self.get_mc_vers)
self.scanmods.start()
@staticmethod
def conn_err():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle('Error')
msg.setText("Error contacting server.\n\nMake sure you have an internet connection.")
msg.exec()
@staticmethod
def bad_dir():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle('Error')
msg.setText("Hmm, you sure about that path? It looks suspect to me.")
msg.exec()
@staticmethod
def dir_empty():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle('Error')
msg.setText("The Mod Directory field is empty. For now, there is nothing to scan.")
msg.exec()
@staticmethod
def no_mods():
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Warning)
msg.setWindowTitle('Error')
msg.setText("There don't appear to be any mods in this folder.")
msg.exec()
def profile_name(self, json_list):
p_name, okPressed = QtWidgets.QInputDialog.getText(None, "New Profile", "Profile name:",
QtWidgets.QLineEdit.Normal, "")
if not okPressed or not p_name:
while True:
qm = QtWidgets.QMessageBox()
ret = qm.question(None, 'Aww man...', "Without a profile name, you will have to rescan\nyour mod "
"folder everytime.\n\nAre you sure you want to continue without "
"one?", qm.Yes | qm.No)
if ret == qm.Yes:
return
else:
p_name, okPressed = QtWidgets.QInputDialog.getText(None, "New Profile", "Profile name:",
QtWidgets.QLineEdit.Normal, "")
if okPressed and p_name:
break
if p_name: # was json_name for some reason. Typo?
self.create_json(p_name, json_list)
@staticmethod
def create_json(p_name, json_list):
# print(json_list)
filename = p_name + ".json"
json_obj = json.dumps(json_list, indent=4)
with open(filename, "w") as outfile:
outfile.write(json_obj)
def no_ID(self, nf_cnt, not_found):
self.label_6.setText(str(nf_cnt))
self.label_5.show()
self.label_6.show()
self.not_found = not_found
def no_ID_msg(self):
nf_str = ""
for item in self.not_found:
if nf_str:
nf_str = nf_str + "\n" + item
else:
nf_str = item
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Information)
msg.setWindowTitle('Whoops')
msg.setText("These mods could not be identified:\n\n" + nf_str + "\n\nYou can add them to the JSON or update "
"them manually.")
msg.exec()
def get_dir(self):
f_d = QtWidgets.QFileDialog
mod_dir = str(f_d.getExistingDirectory(None, "Select Directory", mine_dir))
if system() == 'Windows':
mod_dir = mod_dir.replace('/', '\\')
self.lineEdit.setText(mod_dir)
def update_prog_1(self, prog_i, max_prog=int(), mod_name=""):
if max_prog:
# self.max_prog = max_prog
self.progressBar_1.setMaximum(max_prog)
self.progressBar_1.show()
self.label_11.show()
if mod_name:
self.label_11.setText("Identifying - " + mod_name)
# mod_name = None
if prog_i:
self.incr = self.incr + 1
self.progressBar_1.setValue(self.incr)
pass
def done_scanning(self):
| |
ABOVE
0746 SYRIAC THREE DOTS BELOW
0747 SYRIAC OBLIQUE LINE ABOVE
0748 SYRIAC OBLIQUE LINE BELOW
0749 SYRIAC MUSIC
074A <NAME>
074D SYRIAC LETTER SOGDIAN ZHAIN
074E SYRIAC LETTER SOGDIAN KHAPH
074F SYRIAC LETTER SOGDIAN FE
0750 ARABIC LETTER BEH WITH THREE DOTS HORIZONTALLY BELOW
0751 ARABIC LETTER BEH WITH DOT BELOW AND THREE DOTS ABOVE
0752 ARABIC LETTER BEH WITH THREE DOTS POINTING UPWARDS BELOW
0753 ARABIC LETTER BEH WITH THREE DOTS POINTING UPWARDS BELOW AND TWO DOTS ABOVE
0754 ARABIC LETTER BEH WITH TWO DOTS BELOW AND DOT ABOVE
0755 ARABIC LETTER BEH WITH INVERTED SMALL V BELOW
0756 ARABIC LETTER BEH WITH SMALL V
0757 ARABIC LETTER HAH WITH TWO DOTS ABOVE
0758 ARABIC LETTER HAH WITH THREE DOTS POINTING UPWARDS BELOW
0759 ARABIC LETTER DAL WITH TWO DOTS VERTICALLY BELOW AND SMALL TAH
075A ARABIC LETTER DAL WITH INVERTED SMALL V BELOW
075B ARABIC LETTER REH WITH STROKE
075C ARABIC LETTER SEEN WITH FOUR DOTS ABOVE
075D ARABIC LETTER AIN WITH TWO DOTS ABOVE
075E ARABIC LETTER AIN WITH THREE DOTS POINTING DOWNWARDS ABOVE
075F ARABIC LETTER AIN WITH TWO DOTS VERTICALLY ABOVE
0760 ARABIC LETTER FEH WITH TWO DOTS BELOW
0761 ARABIC LETTER FEH WITH THREE DOTS POINTING UPWARDS BELOW
0762 ARABIC LETTER KEHEH WITH DOT ABOVE
0763 ARABIC LETTER KEHEH WITH THREE DOTS ABOVE
0764 ARABIC LETTER KEHEH WITH THREE DOTS POINTING UPWARDS BELOW
0765 ARABIC LETTER MEEM WITH DOT ABOVE
0766 ARABIC LETTER MEEM WITH DOT BELOW
0767 ARABIC LETTER NOON WITH TWO DOTS BELOW
0768 ARABIC LETTER NOON WITH SMALL TAH
0769 ARABIC LETTER NOON WITH SMALL V
076A ARABIC LETTER LAM WITH BAR
076B ARABIC LETTER REH WITH TWO DOTS VERTICALLY ABOVE
076C ARABIC LETTER REH WITH HAMZA ABOVE
076D ARABIC LETTER SEEN WITH TWO DOTS VERTICALLY ABOVE
076E ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH BELOW
076F ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH AND TWO DOTS
0770 ARABIC LETTER SEEN WITH SMALL ARABIC LETTER TAH AND TWO DOTS
0771 ARABIC LETTER REH WITH SMALL ARABIC LETTER TAH AND TWO DOTS
0772 ARABIC LETTER HAH WITH SMALL ARABIC LETTER TAH ABOVE
0773 ARABIC LETTER ALEF WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE
0774 ARABIC LETTER ALEF WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE
0775 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE
0776 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE
0777 ARABIC LETTER FARSI YEH WITH EXTENDED ARABIC-INDIC DIGIT FOUR BELOW
0778 ARABIC LETTER WAW WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE
0779 ARABIC LETTER WAW WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE
077A ARABIC LETTER YEH BARREE WITH EXTENDED ARABIC-INDIC DIGIT TWO ABOVE
077B ARABIC LETTER YEH BARREE WITH EXTENDED ARABIC-INDIC DIGIT THREE ABOVE
077C ARABIC LETTER HAH WITH EXTENDED ARABIC-INDIC DIGIT FOUR BELOW
077D ARABIC LETTER SEEN WITH EXTENDED ARABIC-INDIC DIGIT FOUR ABOVE
077E ARABIC LETTER SEEN WITH INVERTED V
077F ARABIC LETTER KAF WITH TWO DOTS ABOVE
0780 THAANA LETTER HAA
0781 THAANA LETTER SHAVIYANI
0782 THAANA LETTER NOONU
0783 THAANA LETTER RAA
0784 THAANA LETTER BAA
0785 THAANA LETTER LHAVIYANI
0786 THAANA LETTER KAAFU
0787 THAANA LETTER ALIFU
0788 THAANA LETTER VAAVU
0789 THAANA LETTER MEEMU
078A THAANA LETTER FAAFU
078B THAANA LETTER DHAALU
078C THAANA LETTER THAA
078D THAANA LETTER LAAMU
078E THAANA LETTER GAAFU
078F THAANA LETTER GNAVIYANI
0790 THAANA LETTER SEENU
0791 THAANA LETTER DAVIYANI
0792 THAANA LETTER ZAVIYANI
0793 THAANA LETTER TAVIYANI
0794 THAANA LETTER YAA
0795 THAANA LETTER PAVIYANI
0796 THAANA LETTER JAVIYANI
0797 THAANA LETTER CHAVIYANI
0798 THAANA LETTER TTAA
0799 THAANA LETTER HHAA
079A THAANA LETTER KHAA
079B THAANA LETTER THAALU
079C THAANA LETTER ZAA
079D THAANA LETTER SHEENU
079E THAANA LETTER SAADHU
079F THAANA LETTER DAADHU
07A0 THAANA LETTER TO
07A1 THAANA LETTER ZO
07A2 THAANA LETTER AINU
07A3 THAANA LETTER GHAINU
07A4 THAANA LETTER QAAFU
07A5 THAANA LETTER WAAVU
07A6 THAANA ABAFILI
07A7 THAANA AABAAFILI
07A8 THAANA IBIFILI
07A9 THAANA EEBEEFILI
07AA THAANA UBUFILI
07AB THAANA OOBOOFILI
07AC THAANA EBEFILI
07AD THAANA EYBEYFILI
07AE THAANA OBOFILI
07AF THAANA OABOAFILI
07B0 THAANA SUKUN
07B1 THAANA LETTER NAA
07C0 NKO DIGIT ZERO
07C1 NKO DIGIT ONE
07C2 NKO DIGIT TWO
07C3 NKO DIGIT THREE
07C4 NKO DIGIT FOUR
07C5 NKO DIGIT FIVE
07C6 NKO DIGIT SIX
07C7 NKO DIGIT SEVEN
07C8 NKO DIGIT EIGHT
07C9 NKO DIGIT NINE
07CA NKO LETTER A
07CB NKO LETTER EE
07CC NKO LETTER I
07CD NKO LETTER E
07CE NKO LETTER U
07CF NKO LETTER OO
07D0 NKO LETTER O
07D1 NKO LETTER DAGBASINNA
07D2 NKO LETTER N
07D3 NKO LETTER BA
07D4 NKO LETTER PA
07D5 NKO LETTER TA
07D6 NKO LETTER JA
07D7 NKO LETTER CHA
07D8 NKO LETTER DA
07D9 NKO LETTER RA
07DA NKO LETTER RRA
07DB NKO LETTER SA
07DC NKO LETTER GBA
07DD NKO LETTER FA
07DE NKO LETTER KA
07DF NKO LETTER LA
07E0 NKO LETTER NA WOLOSO
07E1 NKO LETTER MA
07E2 NKO LETTER NYA
07E3 NKO LETTER NA
07E4 NKO LETTER HA
07E5 NKO LETTER WA
07E6 NKO LETTER YA
07E7 NKO LETTER NYA WOLOSO
07E8 NKO LETTER JONA JA
07E9 NKO LETTER JONA CHA
07EA NKO LETTER JONA RA
07EB NKO COMBINING SHORT HIGH TONE
07EC NKO COMBINING SHORT LOW TONE
07ED NKO COMBINING SHORT RISING TONE
07EE NKO COMBINING LONG DESCENDING TONE
07EF NKO COMBINING LONG HIGH TONE
07F0 NKO COMBINING LONG LOW TONE
07F1 NKO COMBINING LONG RISING TONE
07F2 NKO COMBINING NASALIZATION MARK
07F3 NKO COMBINING DOUBLE DOT ABOVE
07F4 NKO HIGH TONE APOSTROPHE
07F5 NKO LOW TONE APOSTROPHE
07F6 NKO SYMBOL OO DENNEN
07F7 NKO SYMBOL GBAKURUNEN
07F8 NKO COMMA
07F9 NKO EXCLAMATION MARK
07FA NKO LAJANYALAN
0800 SAMARITAN LETTER ALAF
0801 SAMARITAN LETTER BIT
0802 SAMARITAN LETTER GAMAN
0803 SAMARITAN LETTER DALAT
0804 SAMARITAN LETTER IY
0805 SAMARITAN LETTER BAA
0806 SAMARITAN LETTER ZEN
0807 SAMARITAN LETTER IT
0808 SAMARITAN LETTER TIT
0809 SAMARITAN LETTER YUT
080A SAMARITAN LETTER KAAF
080B SAMARITAN LETTER LABAT
080C SAMARITAN LETTER MIM
080D SAMARITAN LETTER NUN
080E SAMARITAN LETTER SINGAAT
080F SAMARITAN LETTER IN
0810 SAMARITAN LETTER FI
0811 SAMARITAN LETTER TSAADIY
0812 SAMARITAN LETTER QUF
0813 SAMARITAN LETTER RISH
0814 SAMARITAN LETTER SHAN
0815 SAMARITAN LETTER TAAF
0816 SAMARITAN MARK IN
0817 SAMARITAN MARK IN-ALAF
0818 SAMARITAN MARK OCCLUSION
0819 SAMARITAN MARK DAGESH
081A SAMARITAN MODIFIER LETTER EPENTHETIC YUT
081B SAMARITAN MARK EPENTHETIC YUT
081C SAMARITAN VOWEL SIGN LONG E
081D SAMARITAN VOWEL SIGN E
081E SAMARITAN VOWEL SIGN OVERLONG AA
081F SAMARITAN VOWEL SIGN LONG AA
0820 SAMARITAN VOWEL SIGN AA
0821 SAMARITAN VOWEL SIGN OVERLONG A
0822 SAMARITAN VOWEL SIGN LONG A
0823 SAMARITAN VOWEL SIGN A
0824 SAMARITAN MODIFIER LETTER SHORT A
0825 SAMARITAN VOWEL SIGN SHORT A
0826 SAMARITAN VOWEL SIGN LONG U
0827 SAMARITAN VOWEL SIGN U
0828 SAMARITAN MODIFIER LETTER I
0829 SAMARITAN VOWEL SIGN LONG I
082A SAMARITAN VOWEL SIGN I
082B SAMARITAN VOWEL SIGN O
082C SAMARITAN VOWEL SIGN SUKUN
082D SAMARITAN MARK NEQUDAA
0830 SAMARITAN PUNCTUATION NEQUDAA
0831 SAMARITAN PUNCTUATION AFSAAQ
0832 SAMARITAN PUNCTUATION ANGED
0833 SAMARITAN PUNCTUATION BAU
0834 SAMARITAN PUNCTUATION ATMAAU
0835 SAMARITAN PUNCTUATION SHIYYAALAA
0836 SAMARITAN ABBREVIATION MARK
0837 SAMARITAN PUNCTUATION MELODIC QITSA
0838 SAMARITAN PUNCTUATION ZIQAA
0839 SAMARITAN PUNCTUATION QITSA
083A SAMARITAN PUNCTUATION ZAEF
083B SAMARITAN PUNCTUATION TURU
083C SAMARITAN PUNCTUATION ARKAANU
083D SAMARITAN PUNCTUATION SOF MASHFAAT
083E SAMARITAN PUNCTUATION ANNAAU
0900 DEVANAGARI SIGN INVERTED CANDRABINDU
0901 DEVANAGARI SIGN CANDRABINDU
0902 DEVANAGARI SIGN ANUSVARA
0903 DEVANAGARI SIGN VISARGA
0904 DEVANAGARI LETTER SHORT A
0905 DEVANAGARI LETTER A
0906 DEVANAGARI LETTER AA
0907 DEVANAGARI LETTER I
0908 DEVANAGARI LETTER II
0909 DEVANAGARI LETTER U
090A DEVANAGARI LETTER UU
090B DEVANAGARI LETTER VOCALIC R
090C DEVANAGARI LETTER VOCALIC L
090D DEVANAGARI LETTER CANDRA E
090E DEVANAGARI LETTER SHORT E
090F DEVANAGARI LETTER E
0910 DEVANAGARI LETTER AI
0911 DEVANAGARI LETTER CANDRA O
0912 DEVANAGARI LETTER SHORT O
0913 DEVANAGARI LETTER O
0914 DEVANAGARI LETTER AU
0915 DEVANAGARI LETTER KA
0916 DEVANAGARI LETTER KHA
0917 DEVANAGARI LETTER GA
0918 DEVANAGARI LETTER GHA
0919 DEVANAGARI LETTER NGA
091A DEVANAGARI LETTER CA
091B DEVANAGARI LETTER CHA
091C DEVANAGARI LETTER JA
091D DEVANAGARI LETTER JHA
091E DEVANAGARI LETTER NYA
091F DEVANAGARI LETTER TTA
0920 DEVANAGARI LETTER TTHA
0921 DEVANAGARI LETTER DDA
0922 DEVANAGARI LETTER DDHA
0923 DEVANAGARI LETTER NNA
0924 DEVANAGARI LETTER TA
0925 DEVANAGARI LETTER THA
0926 DEVANAGARI LETTER DA
0927 DEVANAGARI LETTER DHA
0928 DEVANAGARI LETTER NA
0929 DEVANAGARI LETTER NNNA
092A DEVANAGARI LETTER PA
092B DEVANAGARI LETTER PHA
092C DEVANAGARI LETTER BA
092D DEVANAGARI LETTER BHA
092E DEVANAGARI LETTER MA
092F DEVANAGARI LETTER YA
0930 DEVANAGARI LETTER RA
0931 DEVANAGARI LETTER RRA
0932 DEVANAGARI LETTER LA
0933 DEVANAGARI LETTER LLA
0934 DEVANAGARI LETTER LLLA
0935 DEVANAGARI LETTER VA
0936 DEVANAGARI LETTER SHA
0937 DEVANAGARI LETTER SSA
0938 DEVANAGARI LETTER SA
0939 DEVANAGARI LETTER HA
093C DEVANAGARI SIGN NUKTA
093D DEVANAGARI SIGN AVAGRAHA
093E DEVANAGARI VOWEL SIGN AA
093F DEVANAGARI VOWEL SIGN I
0940 DEVANAGARI VOWEL SIGN II
0941 DEVANAGARI VOWEL SIGN U
0942 DEVANAGARI VOWEL SIGN UU
0943 DEVANAGARI VOWEL SIGN VOCALIC R
0944 DEVANAGARI VOWEL SIGN VOCALIC RR
0945 DEVANAGARI VOWEL SIGN CANDRA E
0946 DEVANAGARI VOWEL SIGN SHORT E
0947 DEVANAGARI VOWEL SIGN E
0948 DEVANAGARI VOWEL SIGN AI
0949 DEVANAGARI VOWEL SIGN CANDRA O
094A DEVANAGARI VOWEL SIGN SHORT O
094B DEVANAGARI VOWEL SIGN O
094C DEVANAGARI VOWEL SIGN AU
094D DEVANAGARI SIGN VIRAMA
094E DEVANAGARI VOWEL SIGN PRISHTHAMATRA E
0950 DEVANAGARI OM
0951 DEVANAGARI STRESS SIGN UDATTA
0952 DEVANAGARI STRESS SIGN ANUDATTA
0953 DEVANAGARI GRAVE ACCENT
0954 DEVANAGARI ACUTE ACCENT
0955 DEVANAGARI VOWEL SIGN CANDRA LONG E
0958 DEVANAGARI LETTER QA
0959 DEVANAGARI LETTER KHHA
095A DEVANAGARI LETTER GHHA
095B DEVANAGARI LETTER ZA
095C DEVANAGARI LETTER DDDHA
095D DEVANAGARI LETTER RHA
095E DEVANAGARI LETTER FA
095F DEVANAGARI LETTER YYA
0960 DEVANAGARI LETTER VOCALIC RR
0961 DEVANAGARI LETTER VOCALIC LL
0962 DEVANAGARI VOWEL SIGN VOCALIC L
0963 DEVANAGARI VOWEL SIGN VOCALIC LL
0964 DEVANAGARI DANDA
0965 DEVANAGARI DOUBLE DANDA
0966 DEVANAGARI DIGIT ZERO
0967 DEVANAGARI DIGIT ONE
0968 DEVANAGARI DIGIT TWO
0969 DEVANAGARI DIGIT THREE
096A DEVANAGARI DIGIT FOUR
096B DEVANAGARI DIGIT FIVE
096C DEVANAGARI DIGIT SIX
096D DEVANAGARI DIGIT SEVEN
096E DEVANAGARI DIGIT EIGHT
096F DEVANAGARI DIGIT NINE
0970 DEVANAGARI ABBREVIATION SIGN
0971 DEVANAGARI SIGN HIGH SPACING DOT
0972 DEVANAGARI LETTER CANDRA A
0979 DEVANAGARI LETTER ZHA
097A DEVANAGARI LETTER HEAVY YA
097B DEVANAGARI LETTER GGA
097C DEVANAGARI LETTER JJA
097D DEVANAGARI LETTER GLOTTAL STOP
097E DEVANAGARI LETTER DDDA
097F DEVANAGARI LETTER BBA
0981 BENGALI SIGN CANDRABINDU
0982 BENGALI SIGN ANUSVARA
0983 BENGALI SIGN VISARGA
0985 BENGALI LETTER A
0986 BENGALI LETTER AA
0987 BENGALI LETTER I
0988 BENGALI LETTER II
0989 BENGALI LETTER U
098A BENGALI LETTER UU
098B BENGALI LETTER VOCALIC R
098C BENGALI LETTER VOCALIC L
098F BENGALI LETTER E
0990 BENGALI LETTER AI
0993 BENGALI LETTER O
0994 BENGALI LETTER AU
0995 BENGALI LETTER KA
0996 BENGALI LETTER KHA
0997 BENGALI LETTER GA
0998 BENGALI LETTER GHA
0999 BENGALI LETTER NGA
099A BENGALI LETTER CA
099B BENGALI LETTER CHA
099C BENGALI LETTER JA
099D BENGALI LETTER JHA
099E BENGALI LETTER NYA
099F BENGALI LETTER TTA
09A0 BENGALI LETTER TTHA
09A1 BENGALI LETTER DDA
09A2 BENGALI LETTER DDHA
09A3 BENGALI LETTER NNA
09A4 BENGALI LETTER TA
09A5 BENGALI LETTER THA
09A6 BENGALI LETTER DA
09A7 BENGALI LETTER DHA
09A8 BENGALI LETTER NA
09AA BENGALI LETTER PA
09AB BENGALI LETTER PHA
09AC BENGALI LETTER BA
09AD BENGALI LETTER BHA
09AE BENGALI LETTER MA
09AF BENGALI LETTER YA
09B0 BENGALI LETTER RA
09B2 BENGALI LETTER LA
09B6 BENGALI LETTER SHA
09B7 BENGALI LETTER SSA
09B8 BENGALI LETTER SA
09B9 BENGALI LETTER HA
09BC BENGALI SIGN NUKTA
09BD BENGALI SIGN AVAGRAHA
09BE BENGALI VOWEL SIGN AA
09BF BENGALI VOWEL SIGN I
09C0 BENGALI VOWEL SIGN II
09C1 BENGALI VOWEL SIGN U
09C2 BENGALI VOWEL SIGN UU
09C3 BENGALI VOWEL SIGN VOCALIC R
09C4 BENGALI VOWEL SIGN VOCALIC RR
09C7 BENGALI VOWEL SIGN E
09C8 BENGALI VOWEL SIGN AI
09CB BENGALI VOWEL SIGN O
09CC BENGALI VOWEL SIGN AU
09CD BENGALI SIGN VIRAMA
09CE BENGALI LETTER KHANDA TA
09D7 BENGALI AU LENGTH MARK
09DC BENGALI LETTER RRA
09DD BENGALI LETTER RHA
09DF BENGALI LETTER YYA
09E0 BENGALI LETTER VOCALIC RR
09E1 BENGALI LETTER VOCALIC LL
09E2 BENGALI VOWEL SIGN VOCALIC L
09E3 BENGALI VOWEL SIGN VOCALIC LL
09E6 BENGALI DIGIT ZERO
09E7 BENGALI DIGIT ONE
09E8 BENGALI DIGIT TWO
09E9 BENGALI DIGIT THREE
09EA BENGALI DIGIT FOUR
09EB BENGALI DIGIT FIVE
09EC BENGALI DIGIT SIX
09ED BENGALI DIGIT SEVEN
09EE BENGALI DIGIT EIGHT
09EF BENGALI DIGIT NINE
09F0 BENGALI LETTER RA WITH MIDDLE DIAGONAL
09F1 BENGALI LETTER RA WITH LOWER DIAGONAL
09F2 BENGALI RUPEE MARK
09F3 BENGALI RUPEE SIGN
09F4 BENGALI CURRENCY NUMERATOR ONE
09F5 BENGALI CURRENCY NUMERATOR TWO
09F6 BENGALI CURRENCY NUMERATOR THREE
09F7 BENGALI CURRENCY NUMERATOR FOUR
09F8 BENGALI CURRENCY NUMERATOR ONE LESS THAN THE DENOMINATOR
09F9 BENGALI CURRENCY DENOMINATOR SIXTEEN
09FA BENGALI ISSHAR
09FB BENGALI GANDA MARK
0A01 GURMUKHI SIGN ADAK BINDI
0A02 GURMUKHI SIGN BINDI
0A03 GURMUKHI SIGN VISARGA
0A05 GURMUKHI LETTER A
0A06 GURMUKHI LETTER AA
0A07 GURMUKHI LETTER I
0A08 GURMUKHI LETTER II
0A09 GURMUKHI LETTER U
0A0A GURMUKHI LETTER UU
0A0F GURMUKHI LETTER EE
0A10 GURMUKHI LETTER AI
0A13 GURMUKHI LETTER OO
0A14 GURMUKHI | |
# -*- coding: utf-8 -*-
"""Windows Registry plugin to parse the AMCache.hve Root key."""
import re
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import time_elements as dfdatetime_time_elements
from dfwinreg import errors as dfwinreg_errors
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class AMCacheFileEventData(events.EventData):
"""AMCache file event data.
Attributes:
company_name (str): company name that created product file belongs to.
file_description (str): description of file.
file_reference (str): file system file reference, for example 9-1 (MFT
entry - sequence number).
file_size (int): size of file in bytes.
file_version (str): version of file.
full_path (str): full path of file.
language_code (int): language code of file.
product_name (str): product name file belongs to.
program_identifier (str): GUID of entry under Root/Program key file belongs
to.
sha1 (str): SHA-1 of file.
"""
DATA_TYPE = 'windows:registry:amcache'
def __init__(self):
"""Initializes event data."""
super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.company_name = None
self.file_description = None
self.file_reference = None
self.file_size = None
self.file_version = None
self.full_path = None
self.language_code = None
self.product_name = None
self.program_identifier = None
self.sha1 = None
class AMCacheProgramEventData(events.EventData):
"""AMCache programs event data.
Attributes:
entry_type (str): type of entry (usually AddRemoveProgram).
file_paths (str): file paths of installed program.
files (str): list of files belonging to program.
language_code (int): language_code of program.
msi_package_code (str): MSI package code of program.
msi_product_code (str): MSI product code of program.
name (str): name of installed program.
package_code (str): package code of program.
product_code (str): product code of program.
publisher (str): publisher of program.
uninstall_key (str): unicode string of uninstall registry key for program.
version (str): version of program.
"""
DATA_TYPE = 'windows:registry:amcache:programs'
def __init__(self):
"""Initializes event data."""
super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)
self.entry_type = None
self.file_paths = None
self.files = None
self.language_code = None
self.msi_package_code = None
self.msi_product_code = None
self.name = None
self.package_code = None
self.product_code = None
self.publisher = None
self.uninstall_key = None
self.version = None
class AMCachePlugin(interface.WindowsRegistryPlugin):
"""AMCache.hve Windows Registry plugin."""
NAME = 'amcache'
DATA_FORMAT = 'AMCache (AMCache.hve)'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter('\\Root')])
# Contains: {value name: attribute name}
_APPLICATION_SUB_KEY_VALUES = {
'LowerCaseLongPath': 'full_path',
'ProductName': 'product_name',
'ProductVersion': 'file_version',
'ProgramId': 'program_identifier',
'Publisher': 'company_name',
'Size': 'file_size'}
_FILE_REFERENCE_KEY_VALUES = {
'0': 'product_name',
'1': 'company_name',
'3': 'language_code',
'5': 'file_version',
'6': 'file_size',
'c': 'file_description',
'15': 'full_path',
'100': 'program_identifier',
'101': 'sha1'}
_AMCACHE_LINK_TIME = 'f'
_AMCACHE_FILE_MODIFICATION_TIME = '11'
_AMCACHE_FILE_CREATION_TIME = '12'
_AMCACHE_ENTRY_WRITE_TIME = '17'
_AMCACHE_P_INSTALLATION_TIME = 'a'
_AMCACHE_P_FILES = 'Files'
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss"
# for example "04/07/2014 15:18:49"
# TODO: determine if this is true for other locales.
_LINK_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9])')
_PRODUCT_KEY_VALUES = {
'0': 'name',
'1': 'version',
'2': 'publisher',
'3': 'language_code',
'6': 'entry_type',
'7': 'uninstall_key',
'd': 'file_paths',
'f': 'product_code',
'10': 'package_code',
'11': 'msi_product_code',
'12': 'msi_package_code'}
def _GetValueDataAsObject(
self, parser_mediator, key_path, value_name, registry_value):
"""Retrieves the value data as an object from a Windows Registry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key_path (str): key path.
value_name (str): name of the value.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
object: value data or None when the value data cannot be determined.
"""
if registry_value.data is None:
return '(empty)'
try:
value_object = registry_value.GetDataAsObject()
if registry_value.DataIsMultiString():
value_object = list(value_object)
elif (not registry_value.DataIsInteger() and
not registry_value.DataIsString()):
# Represent remaining types like REG_BINARY and
# REG_RESOURCE_REQUIREMENT_LIST.
value_object = registry_value.data
except dfwinreg_errors.WinRegistryValueError as exception:
parser_mediator.ProduceRecoveryWarning((
'Unable to retrieve value data of type: {0:s} as object from '
'value: {1:s} in key: {2:s} with error: {3!s}').format(
registry_value.data_type_string, value_name, key_path, exception))
value_object = None
return value_object
def _ParseApplicationSubKey(self, parser_mediator, application_sub_key):
"""Parses a Root\\InventoryApplicationFile\\%NAME%|%IDENTIFIER% key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
application_sub_key (dfwinreg.WinRegistryKey): application sub key of the
InventoryApplicationFile Windows Registry key.
"""
event_data = AMCacheFileEventData()
for value_name, attribute_name in self._APPLICATION_SUB_KEY_VALUES.items():
value = application_sub_key.GetValueByName(value_name)
if value:
value_data = self._GetValueDataAsObject(
parser_mediator, application_sub_key.path, value_name, value)
setattr(event_data, attribute_name, value_data)
install_date_value = application_sub_key.GetValueByName('InstallDate')
if install_date_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, install_date_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
install_date_msi_value = application_sub_key.GetValueByName(
'InstallDateMsi')
if install_date_msi_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, install_date_msi_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
link_date_value = application_sub_key.GetValueByName('LinkDate')
if link_date_value:
date_time = self._ParseDateStringValue(
parser_mediator, application_sub_key.path, link_date_value)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseDateStringValue(self, parser_mediator, key_path, registry_value):
"""Parses a date and time string value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key_path (str): key path.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning((
'unsupported {0:s} with value data type: {1:s} in key: '
'{2:s}').format(
registry_value.name, registry_value.data_type_string, key_path))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning(
'missing {0:s} value data in key: {1:s}'.format(
registry_value.name, key_path))
return None
re_match = self._LINK_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported {0:s} value data: {1!s} in key: {2:s}'.format(
registry_value.name, date_time_string, key_path))
return None
month, day_of_month, year, hours, minutes, seconds= re_match.groups()
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid {0:s} date time value: {1!s} in key: {2:s}'.format(
registry_value.name, date_time_string, key_path))
return None
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid {0:s} date time value: {1!s} in key: {2:s}'.format(
registry_value.name, time_elements_tuple, key_path))
return None
return date_time
def _ParseFileKey(self, parser_mediator, file_key):
"""Parses a Root\\File key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
file_key (dfwinreg.WinRegistryKey): the File Windows Registry key.
"""
for volume_key in file_key.GetSubkeys():
for file_reference_key in volume_key.GetSubkeys():
self._ParseFileReferenceKey(parser_mediator, file_reference_key)
def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):
"""Parses a file reference key (sub key of Root\\File\\%VOLUME%) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
file_reference_key (dfwinreg.WinRegistryKey): file reference Windows
Registry key.
"""
event_data = AMCacheFileEventData()
try:
if '0000' in file_reference_key.name:
# A NTFS file is a combination of MFT entry and sequence number.
sequence_number, mft_entry = file_reference_key.name.split('0000')
mft_entry = int(mft_entry, 16)
sequence_number = int(sequence_number, 16)
event_data.file_reference = '{0:d}-{1:d}'.format(
mft_entry, sequence_number)
else:
# A FAT file is a single number.
file_reference = int(file_reference_key.name, 16)
event_data.file_reference = '{0:d}'.format(file_reference)
except (ValueError, TypeError):
pass
for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():
value = file_reference_key.GetValueByName(value_name)
if not value:
continue
value_data = self._GetValueDataAsObject(
parser_mediator, file_reference_key.path, value_name, value)
if attribute_name == 'sha1' and value_data.startswith('0000'):
# Strip off the 4 leading zero's from the sha1 hash.
value_data = value_data[4:]
setattr(event_data, attribute_name, value_data)
write_time_value = file_reference_key.GetValueByName(
self._AMCACHE_ENTRY_WRITE_TIME)
if write_time_value:
timestamp = write_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
creation_time_value = file_reference_key.GetValueByName(
self._AMCACHE_FILE_CREATION_TIME)
if creation_time_value:
timestamp = creation_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modification_time_value = file_reference_key.GetValueByName(
self._AMCACHE_FILE_MODIFICATION_TIME)
if modification_time_value:
timestamp = modification_time_value.GetDataAsObject()
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
link_time_value = file_reference_key.GetValueByName(self._AMCACHE_LINK_TIME)
if link_time_value:
timestamp = link_time_value.GetDataAsObject()
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LINK_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseInventoryApplicationFileKey(
self, parser_mediator, inventory_application_file_key):
"""Parses a Root\\InventoryApplicationFile key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
inventory_application_file_key (dfwinreg.WinRegistryKey): the
InventoryApplicationFile Windows Registry key.
"""
for application_sub_key in inventory_application_file_key.GetSubkeys():
self._ParseApplicationSubKey(parser_mediator, application_sub_key)
def _ParseProgramKey(self, parser_mediator, program_key):
"""Parses a program key (a sub key of Root\\Programs) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
program_key (dfwinreg.WinRegistryKey): program Windows Registry key.
"""
event_data = AMCacheProgramEventData()
for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items():
value = program_key.GetValueByName(value_name)
if value:
value_data = self._GetValueDataAsObject(
parser_mediator, program_key.path, value_name, value)
setattr(event_data, attribute_name, value_data)
installation_time_value = program_key.GetValueByName(
self._AMCACHE_P_INSTALLATION_TIME)
if installation_time_value:
timestamp = installation_time_value.GetDataAsObject()
installation_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
installation_time, definitions.TIME_DESCRIPTION_INSTALLATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseProgramsKey(self, parser_mediator, programs_key):
"""Parses a Root\\Programs key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
programs_key (dfwinreg.WinRegistryKey): the Programs Windows Registry key.
"""
for program_key in programs_key.GetSubkeys():
self._ParseProgramKey(parser_mediator, program_key)
def _ParseRootKey(self, parser_mediator, root_key):
"""Parses a | |
positive labels
y_polytope = np.copy(y)
# if label is inside of the polytope, the distance is negative and the label is not divided into
y_polytope[y_polytope != idx_outside_polytope] = -1
# if label is outside of the polytope, the distance is positive and the label is clustered
y_polytope[y_polytope == idx_outside_polytope] = 1
index_positives = np.where(y_polytope == 1)[0] # index for Positive labels (outside polytope)
index_negatives = np.where(y_polytope == -1)[0] # index for Negative labels (inside polytope)
n_consensus = self.n_consensus
# define the clustering assignment matrix (each column correspond to one consensus run)
self.clustering_assignments = np.zeros((len(index_positives), n_consensus))
for consensus in range(n_consensus):
# first we initialize the clustering matrix S, with the initialization strategy set in self.initialization
S, cluster_index = self.initialize_clustering(X, y_polytope, index_positives)
if self.negative_weighting in ['uniform']:
S[index_negatives] = 1 / self.n_clusters
elif self.negative_weighting in ['hard']:
S[index_negatives] = np.rint(S[index_negatives])
if self.positive_weighting in ['hard']:
S[index_positives] = np.rint(S[index_positives])
cluster_index = self.run_EM(X, y_polytope, S, cluster_index, index_positives, index_negatives, consensus)
# update the cluster index for the consensus clustering
self.clustering_assignments[:, consensus] = cluster_index
if n_consensus > 1:
self.clustering_ensembling(X, y_polytope, index_positives, index_negatives)
def initialize_clustering(self, X, y_polytope, index_positives):
"""Perform a bagging of the previously obtained clusterings and compute new hyperplanes.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y_polytope : array-like, shape (n_samples,)
Target values.
index_positives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
Returns
-------
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
"""
S = np.ones((len(y_polytope), self.n_clusters)) / self.n_clusters
if self.clustering in ["k_means"]:
KM = KMeans(n_clusters=self.n_clusters, init="random", n_init=1).fit(X[index_positives])
S = one_hot_encode(KM.predict(X))
if self.clustering in ["gaussian_mixture"]:
GMM = GaussianMixture(n_components=self.n_clusters, init_params="random", n_init=1,
covariance_type="spherical").fit(X[index_positives])
S = GMM.predict_proba(X)
else:
custom_clustering_method_ = copy.deepcopy(self.clustering)
S_positives = custom_clustering_method_.fit_predict(X[index_positives])
S_distances = np.zeros((len(X), np.max(S_positives) + 1))
for cluster in range(np.max(S_positives) + 1):
S_distances[:, cluster] = np.sum(
np.abs(X - np.mean(X[index_positives][S_positives == cluster], 0)[None, :]), 1)
S_distances /= np.sum(S_distances, 1)[:, None]
S = 1 - S
cluster_index = np.argmax(S[index_positives], axis=1)
return S, cluster_index
def maximization_step(self, X, y_polytope, S):
if self.maximization == "svc":
for cluster in range(self.n_clusters):
cluster_assignment = np.ascontiguousarray(S[:, cluster])
SVM_coefficient, SVM_intercept = launch_svc(X, y_polytope, cluster_assignment)
self.coefficients[cluster] = SVM_coefficient
self.intercepts[cluster] = SVM_intercept
elif self.maximization == "lr":
for cluster in range(self.n_clusters):
cluster_assignment = np.ascontiguousarray(S[:, cluster])
logistic_coefficient, logistic_intercept = launch_logistic(X, y_polytope, cluster_assignment)
self.coefficients[cluster] = logistic_coefficient
self.intercepts[cluster] = logistic_intercept
else:
for cluster in range(self.n_clusters):
cluster_assignment = np.ascontiguousarray(S[:, cluster])
self.maximization.fit(X, y_polytope, sample_weight=cluster_assignment)
self.coefficients[cluster] = self.maximization.coef_
self.intercepts[cluster] = self.maximization.intercept_
def expectation_step(self, X, S, index_positives, consensus):
"""Update clustering method (update clustering distribution matrix S).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
index_positives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
consensus : int
which consensus is being run ?
Returns
-------
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
cluster_index : array-like, shape (n_positives_samples, )
clusters predictions argmax for positive samples.
"""
# get directions basis
directions_basis = []
for cluster in range(self.n_clusters):
directions_basis.extend(self.coefficients[cluster])
norm_directions = [np.linalg.norm(direction) for direction in directions_basis]
directions_basis = np.array(directions_basis) / np.array(norm_directions)[:, None]
# apply graam-schmidt algorithm
orthonormalized_basis = self.graam_schmidt(directions_basis)
self.orthonormal_basis[consensus] = orthonormalized_basis
self.orthonormal_basis[-1] = np.array(orthonormalized_basis).copy()
X_proj = X @ self.orthonormal_basis[consensus].T
# get centroids or barycenters
centroids = [np.mean(S[index_positives, cluster][:, None] * X_proj[index_positives, :], 0) for cluster in
range(self.n_clusters)]
if self.clustering == 'k_means':
self.clustering_method[consensus] = KMeans(n_clusters=self.n_clusters, init=np.array(centroids),
n_init=1).fit(X_proj[index_positives])
Q_positives = self.clustering_method[consensus].fit_predict(X_proj[index_positives])
Q_distances = np.zeros((len(X_proj), np.max(Q_positives) + 1))
for cluster in range(np.max(Q_positives) + 1):
Q_distances[:, cluster] = np.sum(
np.abs(X_proj - self.clustering_method[consensus].cluster_centers_[cluster]), 1)
Q_distances = Q_distances / np.sum(Q_distances, 1)[:, None]
Q = 1 - Q_distances
elif self.clustering == 'gaussian_mixture':
self.clustering_method[consensus] = GaussianMixture(n_components=self.n_clusters,
covariance_type="spherical",
means_init=np.array(centroids)).fit(
X_proj[index_positives])
Q = self.clustering_method[consensus].predict_proba(X_proj)
self.clustering_method[-1] = copy.deepcopy(self.clustering_method[consensus])
else:
self.clustering_method[consensus] = copy.deepcopy(self.clustering)
Q_positives = self.clustering_method[consensus].fit_predict(X_proj[index_positives])
Q_distances = np.zeros((len(X_proj), np.max(Q_positives) + 1))
for cluster in range(np.max(Q_positives) + 1):
Q_distances[:, cluster] = np.sum(
np.abs(X_proj - np.mean(X_proj[index_positives][Q_positives == cluster], 0)[None, :]), 1)
Q_distances = Q_distances / np.sum(Q_distances, 1)[:, None]
Q = 1 - Q_distances
# define matrix clustering
S = Q.copy()
cluster_index = np.argmax(Q[index_positives], axis=1)
return S, cluster_index
def graam_schmidt(self, directions_basis):
# compute the most important vectors because Graam-Schmidt is not invariant by permutation when the matrix is not square
scores = []
for i, direction_i in enumerate(directions_basis):
scores_i = []
for j, direction_j in enumerate(directions_basis):
if i != j:
scores_i.append(np.linalg.norm(direction_i - (np.dot(direction_i, direction_j) * direction_j)))
scores.append(np.mean(scores_i))
directions = directions_basis[np.array(scores).argsort()[::-1], :]
# orthonormalize coefficient/direction basis
basis = []
for v in directions:
w = v - np.sum(np.dot(v, b) * b for b in basis)
if len(basis) >= 2:
if np.linalg.norm(w) * self.noise_tolerance_threshold > 1:
basis.append(w / np.linalg.norm(w))
elif np.linalg.norm(w) > 1e-2:
basis.append(w / np.linalg.norm(w))
return np.array(basis)
def run_EM(self, X, y_polytope, S, cluster_index, index_positives, index_negatives, consensus):
"""Perform a bagging of the previously obtained clustering and compute new hyperplanes.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y_polytope : array-like, shape (n_samples,)
Target values.
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
cluster_index : array-like, shape (n_positives_samples, )
clusters predictions argmax for positive samples.
index_positives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
index_negatives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
consensus : int
index of consensus
Returns
-------
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
"""
best_cluster_consistency = 1
if consensus == -1:
save_stabler_coefficients = True
consensus = self.n_consensus + 1
best_cluster_consistency = 0
for iteration in range(self.n_iterations):
# check for degenerate clustering for positive labels (warning) and negatives (might be normal)
for cluster in range(self.n_clusters):
if np.count_nonzero(S[index_positives, cluster]) == 0:
logging.debug("Cluster dropped, one cluster have no positive points anymore, in iteration : %d" % (
iteration - 1))
logging.debug("Re-initialization of the clustering...")
S, cluster_index = self.initialize_clustering(X, y_polytope, index_positives)
if np.max(S[index_negatives, cluster]) < 0.5:
logging.debug(
"Cluster too far, one cluster have no negative points anymore, in consensus : %d" % (
iteration - 1))
logging.debug("Re-distribution of this cluster negative weight to 'all'...")
S[index_negatives, cluster] = 1 / self.n_clusters
# re-init directions for each clusters
self.coefficients = {cluster_i: [] for cluster_i in range(self.n_clusters)}
self.intercepts = {cluster_i: [] for cluster_i in range(self.n_clusters)}
# run maximization step
self.maximization_step(X, y_polytope, S)
# decide the convergence based on the clustering stability
S_hold = S.copy()
S, cluster_index = self.expectation_step(X, S, index_positives, consensus)
# applying the negative weighting set as input
if self.negative_weighting in ['uniform']:
S[index_negatives] = 1 / self.n_clusters
elif self.negative_weighting in ['hard']:
S[index_negatives] = np.rint(S[index_negatives])
if self.positive_weighting in ['hard']:
S[index_positives] = np.rint(S[index_positives])
# check the Clustering Stability \w Adjusted Rand Index for stopping criteria
cluster_consistency = ARI(np.argmax(S[index_positives], 1), np.argmax(S_hold[index_positives], 1))
if cluster_consistency > best_cluster_consistency:
best_cluster_consistency = cluster_consistency
self.coefficients[-1] = copy.deepcopy(self.coefficients)
self.intercepts[-1] = copy.deepcopy(self.intercepts)
self.orthonormal_basis[-1] = copy.deepcopy(self.orthonormal_basis[consensus])
self.clustering_method[-1] = copy.deepcopy(self.clustering_method[consensus])
if cluster_consistency > self.stability_threshold:
break
return cluster_index
def predict_clusters_proba_from_cluster_labels(self, X):
"""Predict positive and negative points clustering probabilities.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
Returns
-------
S : array-like, shape (n_samples, n_samples)
Cluster prediction matrix.
"""
X_clustering_assignments = np.zeros((len(X), self.n_consensus))
for consensus in range(self.n_consensus):
X_proj = X @ self.orthonormal_basis[consensus].T
if self.clustering in ['k_means', 'gaussian_mixture']:
X_clustering_assignments[:, consensus] = self.clustering_method[consensus].predict(X_proj)
else:
X_clustering_assignments[:, consensus] = self.clustering_method[consensus].fit_predict(X_proj)
similarity_matrix = compute_similarity_matrix(self.clustering_assignments,
clustering_assignments_to_pred=X_clustering_assignments)
Q = np.zeros((len(X), self.n_clusters))
y_clusters_train_ = self.cluster_labels_
for cluster in range(self.n_clusters):
Q[:, cluster] = np.mean(similarity_matrix[y_clusters_train_ == cluster], 0)
Q /= np.sum(Q, 1)[:, None]
return Q
def clustering_ensembling(self, X, y_polytope, index_positives, index_negatives):
"""Perform a bagging of the previously obtained clustering and compute new hyperplanes.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y_polytope : array-like, shape (n_samples,)
Modified target values.
index_positives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
index_negatives : array-like, shape (n_positives_samples,)
indexes of the positive labels being clustered
Returns
-------
None
"""
# perform consensus clustering
consensus_cluster_index = compute_spectral_clustering_consensus(self.clustering_assignments, self.n_clusters)
# save clustering predictions computed by bagging step
self.cluster_labels_ = consensus_cluster_index
# update clustering matrix S
S = self.predict_clusters_proba_from_cluster_labels(X)
if self.negative_weighting in ['uniform']:
S[index_negatives] = 1 / self.n_clusters
elif self.negative_weighting in ['hard']:
S[index_negatives] = np.rint(S[index_negatives])
if self.positive_weighting in ['hard']:
S[index_positives] = np.rint(S[index_positives])
cluster_index = self.run_EM(X, y_polytope, S, consensus_cluster_index, index_positives, | |
'''
Do some forced-photometry simulations to look at how errors in
astrometry affect the results. Can we do anything with forced
photometry to measure astrometric offsets? (photometer PSF + its
derivatives?)
'''
from __future__ import print_function
import sys
import os
import numpy as np
import pylab as plt
import fitsio
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.plotutils import PlotSequence, plothist, loghist
from astrometry.util.ttime import Time
from astrometry.util.util import Tan
from legacypipe.survey import LegacySurveyData, imsave_jpeg, get_rgb
from scipy.ndimage.filters import gaussian_filter
from tractor import *
from tractor.pointsource import BasicSource
pixscale = 0.262 / 3600.
class TrackingTractor(Tractor):
def __init__(self, *args, **kwargs):
super(TrackingTractor, self).__init__(*args, **kwargs)
self.reset_tracking()
def reset_tracking(self):
self.tracked_params = []
self.tracked_lnprob = []
def setParams(self, p):
self.tracked_params.append(np.array(p).copy())
super(TrackingTractor, self).setParams(p)
self.tracked_lnprob.append(self.getLogProb())
class SourceDerivatives(MultiParams, BasicSource):
def __init__(self, real, freeze, thaw, brights):
'''
*real*: The real source whose derivatives are my profiles.
*freeze*: List of parameter names to freeze before taking derivs
*thaw*: List of parameter names to thaw before taking derivs
'''
# This a subclass of MultiParams and we pass the brightnesses
# as our params.
super(SourceDerivatives,self).__init__(*brights)
self.real = real
self.freeze = freeze
self.thaw = thaw
self.brights = brights
self.umods = None
# forced photom calls getUnitFluxModelPatches
def getUnitFluxModelPatches(self, img, minval=0., modelMask=None):
self.real.freezeParamsRecursive(*self.freeze)
self.real.thawParamsRecursive(*self.thaw)
#print('SourceDerivatives: source has params:')
#self.real.printThawedParams()
# The derivatives will be scaled by the source brightness;
# undo that scaling.
#print('Brightness:', self.real.brightness)
counts = img.getPhotoCal().brightnessToCounts(self.real.brightness)
derivs = self.real.getParamDerivatives(img, modelMask=modelMask)
#print('SourceDerivs: derivs', derivs)
for d in derivs:
if d is not None:
d /= counts
print('Deriv: abs max', np.abs(d.patch).max(), 'range', d.patch.min(), d.patch.max(), 'sum', d.patch.sum())
# and revert...
self.real.freezeParamsRecursive(*self.thaw)
self.real.thawParamsRecursive(*self.freeze)
self.umods = derivs
return derivs
def getModelPatch(self, img, minsb=0., modelMask=None):
if self.umods is None:
return None
#print('getModelPatch()')
#print('modelMask', modelMask)
pc = img.getPhotoCal()
#counts = [pc.brightnessToCounts(b) for b in self.brights]
#print('umods', self.umods)
return (self.umods[0] * pc.brightnessToCounts(self.brights[0]) +
self.umods[1] * pc.brightnessToCounts(self.brights[1]))
def sim(nims, nsrcs, H,W, ps, dpix, nsamples, forced=True, ceres=False,
alphas=None, derivs=False):
truewcs = Tan(0., 0., W/2., H/2., -pixscale, 0., 0., pixscale,
float(W), float(H))
#ngrid = int(np.ceil(np.sqrt(nsrcs)))
#xx,yy = np.meshgrid(
assert(nsrcs == 1)
sig1 = 0.25
flux = 100.
# sig1 = 0.0025
# flux = 1.
#psf_sigma = 1.5
psf_sigma = 2.0
psfnorm = 1./(2. * np.sqrt(np.pi) * psf_sigma)
nsigma = flux * psfnorm / sig1
print('S/N:', nsigma)
realsrcs = []
derivsrcs = []
for i in range(nsrcs):
src = PointSource(RaDecPos(0., 0.), Flux(flux))
realsrcs.append(src)
if forced:
src.freezeAllBut('brightness')
if derivs:
realsrc = src
dsrc = SourceDerivatives(realsrc, ['brightness'], ['pos'],
[Flux(0.),Flux(0.)])
derivsrcs.append(dsrc)
tims = []
for i in range(nims):
v = psf_sigma**2
xx,yy = np.meshgrid(np.arange(-12,13), np.arange(-12,13))
pp = np.exp(-0.5 * (xx**2 + yy**2) / psf_sigma**2)
pp /= np.sum(pp)
psf = PixelizedPSF(pp)
#psf=GaussianMixturePSF(1., 0., 0., v, v, 0.)))
tims.append(Image(data=np.zeros((H,W), np.float32),
inverr=np.ones((H,W), np.float32) * 1./sig1,
wcs=ConstantFitsWcs(truewcs),
photocal=LinearPhotoCal(1.),
psf=psf))
opt = None
if ceres:
from tractor.ceres_optimizer import *
opt = CeresOptimizer()
# Render "true" models, add noise
tr = TrackingTractor(tims, realsrcs, optimizer=opt)
mods = []
for i,tim in enumerate(tims):
mod = tr.getModelImage(i)
mod += np.random.normal(size=mod.shape) * sig1
tim.data = mod
mods.append(mod)
if ps is not None:
plt.clf()
plt.imshow(mods[0], interpolation='nearest', origin='lower')
ps.savefig()
tr.freezeParam('images')
if derivs:
tr.catalog = Catalog(*(realsrcs + derivsrcs))
print('Params:')
tr.printThawedParams()
p0 = tr.getParams()
results = []
for isamp in range(nsamples):
#print('Sample', isamp)
if isamp % 100 == 0:
print('Sample', isamp)
tr.reset_tracking()
# Scatter the tim WCS CRPIX values
dx = np.zeros(len(tims))
dy = np.zeros(len(tims))
for i,tim in enumerate(tims):
# dx[i] = dpix * np.random.uniform(low=-1., high=1.)
# dy[i] = dpix * np.random.uniform(low=-1., high=1.)
dx[i] = dpix * np.random.normal()
dy[i] = dpix * np.random.normal()
wcs = Tan(0., 0.,
W/2. + dx[i], H/2. + dy[i],
-pixscale, 0., 0., pixscale, float(W), float(H))
tim.wcs = ConstantFitsWcs(wcs)
if ps is not None and isamp == 0:
plt.clf()
cols = int(np.ceil(np.sqrt(len(tims))))
rows = int(np.ceil(len(tims) / float(cols)))
for i,tim in enumerate(tims):
# from astrometry.util.resample import resample_with_wcs
# Yo,Xo,Yi,Xi,rims = resample_with_wcs(truewcs, tim.wcs.wcs,
# [tim.data])
# rimg = np.zeros(truewcs.shape)
# rimg[Yo,Xo] = rims[0]
# plt.subplot(rows, cols, i+1)
# plt.imshow(rimg, interpolation='nearest', origin='lower')
plt.subplot(rows, cols, i+1)
plt.imshow(tim.data, interpolation='nearest', origin='lower',
cmap='gray')
x,y = tim.wcs.positionToPixel(realsrcs[0].pos)
plt.axhline(y, color='r', alpha=0.5, lw=2)
plt.axvline(x, color='r', alpha=0.5, lw=2)
x,y = W/2, H/2
plt.axhline(y, color='b', alpha=0.5, lw=2)
plt.axvline(x, color='b', alpha=0.5, lw=2)
plt.suptitle('Astrometric scatter: +- %g pix' % dpix)
ps.savefig()
tr.setParams(p0)
track = []
if forced:
tr.optimize_forced_photometry()
else:
optargs = dict(priors=False, shared_params=False)
if alphas is not None:
optargs.update(alphas=alphas)
#tr.optimize_loop()
track.append(((None,None,None),tr.getParams(),tr.getLogProb()))
if not ceres:
for step in range(50):
dlnp,X,alpha = tr.optimizer.optimize(tr, **optargs)
track.append(((dlnp,X,alpha),tr.getParams(),tr.getLogProb()))
#print('dlnp,X,alpha', dlnp,X,alpha)
if dlnp == 0:
break
else:
tr.optimize_loop()
if forced:
results.append((dx, dy, tr.getParams()))
else:
results.append((dx, dy, tr.getParams(), track, tr.tracked_params,
tr.tracked_lnprob,
tr.getLogProb()))
if ps is not None and isamp == 0:
if derivs:
plt.clf()
tim = tims[0]
mod1 = tr.getModelImage(tim, srcs=realsrcs)
print('mod1 max value', mod1.max()/np.sum(mod1))
# save derivative params
pd = [d.getParams() for d in derivsrcs]
# zero out the dDec coefficient
for d,dp0 in zip(derivsrcs,pd):
p = dp0[:]
p[1] = 0.
d.setParams(p)
modr = tr.getModelImage(tim, srcs=derivsrcs)
# zero out the dRA coefficient, restore the dDec coeff
for d,dp0 in zip(derivsrcs,pd):
p = dp0[:]
p[0] = 0.
d.setParams(p)
modd = tr.getModelImage(tim, srcs=derivsrcs)
# restore the dRA coeff
for d,dp0 in zip(derivsrcs,pd):
d.setParams(dp0)
mod = tr.getModelImage(tim)
mx = mod.max()
ima = dict(interpolation='nearest', origin='lower',
vmin=-mx, vmax=mx, cmap='gray')
plt.subplot(2,3,1)
plt.imshow(tim.getImage(), **ima)
plt.title('data')
plt.subplot(2,3,2)
plt.imshow(mod1, **ima)
plt.title('source')
dscale = 5
plt.subplot(2,3,3)
plt.imshow(dscale * (tim.getImage() - mod1), **ima)
plt.title('(data - source) x %g' % dscale)
plt.subplot(2,3,4)
plt.imshow(modr*dscale, **ima)
plt.title('dRA x %g' % dscale)
plt.subplot(2,3,5)
plt.imshow(modd*dscale, **ima)
plt.title('dDec x %g' % dscale)
plt.subplot(2,3,6)
plt.imshow(mod, **ima)
plt.title('total')
x1,y1 = tim.wcs.positionToPixel(realsrcs[0].pos)
x2,y2 = W/2, H/2
for i in [1,2,4,5,6]:
plt.subplot(2,3,i)
plt.axhline(y1, color='r', alpha=0.5, lw=2)
plt.axvline(x1, color='r', alpha=0.5, lw=2)
plt.axhline(y2, color='b', alpha=0.5, lw=2)
plt.axvline(x2, color='b', alpha=0.5, lw=2)
ps.savefig()
return results
def compare_optimizers():
allfluxes = []
allra = []
alldec = []
alldx = []
alldy = []
alltracks = []
alllnprobtracks = []
names = []
bestlogprobs = None
#for i in range(3):
for i in range(3):
np.random.seed(seed)
name = ''
nsamples = 200
if i in [0,1]:
print()
print('LSQR Opt')
print()
alphas = None
if i == 1:
alphas = [0.1, 0.3, 1.0]
name = 'LSQR, alphas'
else:
name = 'LSQR'
results = sim(nims, nsrcs, H,W, None, 1.0, nsamples, forced=False,
alphas=alphas)
else:
print()
print('Ceres Opt')
print()
name = 'Ceres'
results = sim(nims, nsrcs, H,W, None, 1.0, nsamples, forced=False, ceres=True)
#results = sim(nims, nsrcs, H,W, None, 1.0, 10, forced=False)
names.append(name)
dx = np.array([r[0] for r in results])
dy = np.array([r[1] for r in results])
pp = np.array([r[2] for r in results])
#print('Params:', pp.shape)
tracks = [r[3] for r in results]
tracks2 = [r[4] for r in results]
flux = pp[:,2]
logprobs = np.array([r[6] for r in results])
if bestlogprobs is None:
bestlogprobs = logprobs
else:
bestlogprobs = np.maximum(bestlogprobs, logprobs)
alltracks.append(tracks)
allfluxes.append(flux)
allra.append(pp[:,0])
alldec.append(pp[:,1])
alldx.append(dx)
alldy.append(dy)
alllnprobtracks.append([r[5] for r in results])
ras = pp[:,0] - dx * pixscale
decs = pp[:,1] + dy * pixscale
meanra = np.mean(ras)
meandec = np.mean(decs)
plt.clf()
plt.scatter(dx, dy, c=flux)
plt.colorbar()
plt.xlabel('WCS Scatter x (pix)')
plt.ylabel('WCS Scatter y (pix)')
plt.axis('equal')
ax = plt.axis()
mx = max(np.abs(ax))
plt.axis([-mx,mx,-mx,mx])
plt.axhline(0., color='k', alpha=0.2)
plt.axvline(0., color='k', alpha=0.2)
plt.axis([-2,2,-2,2])
plt.title(name)
ps.savefig()
# plt.clf()
# for dxi,dyi,track in zip(dx, dy, tracks):
# tp = np.array([t[1] for t in track])
# rapix = tp[:,0] / pixscale - dxi
# decpix = tp[:,1] / pixscale + dyi
# flux = tp[:,2]
# plt.scatter(rapix, decpix, c=flux, zorder=20)
# plt.plot(rapix, decpix, 'k-', alpha=0.1, lw=2, zorder=10)
# plt.colorbar()
# plt.xlabel('RA (pix)')
# plt.ylabel('Dec (pix)')
# #plt.axis('equal')
# #plt.axis('scaled')
# ax = plt.axis()
# mx = max(np.abs(ax))
# plt.axis([-mx,mx,-mx,mx])
# plt.axhline(0., color='k', alpha=0.2)
# plt.axvline(0., color='k', alpha=0.2)
# plt.axis([-2,2,-2,2])
# plt.title(name)
# ps.savefig()
plt.clf()
for dxi,dyi,track,track2 in zip(dx, dy, tracks, tracks2):
#tp = np.array([t[1] for t in track])
#print('track2', track2)
tp = np.vstack(track2)
rapix = (tp[:,0] - dxi*pixscale - meanra ) / pixscale
decpix = (tp[:,1] + dyi*pixscale - meandec) / pixscale
#rapix = tp[:,0] / pixscale - dxi
#decpix = tp[:,1] / pixscale + dyi
#flux = tp[:,2]
#plt.scatter(rapix, decpix, c=flux, zorder=20)
plt.scatter(rapix, decpix,
c=np.arange(len(rapix))/float(len(rapix)),zorder=20)
plt.plot(rapix, decpix, | |
<filename>manimlib/mobject/svg/mtex_mobject.py
from __future__ import annotations
import re
import colour
import itertools as it
from types import MethodType
from typing import Iterable, Union, Sequence
from manimlib.constants import WHITE
from manimlib.mobject.svg.svg_mobject import SVGMobject
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.utils.color import color_to_int_rgb
from manimlib.utils.config_ops import digest_config
from manimlib.utils.iterables import adjacent_pairs
from manimlib.utils.iterables import remove_list_redundancies
from manimlib.utils.tex_file_writing import tex_to_svg_file
from manimlib.utils.tex_file_writing import get_tex_config
from manimlib.utils.tex_file_writing import display_during_execution
from manimlib.logger import log
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from manimlib.mobject.types.vectorized_mobject import VMobject
ManimColor = Union[str, colour.Color, Sequence[float]]
SCALE_FACTOR_PER_FONT_POINT = 0.001
def _get_neighbouring_pairs(iterable: Iterable) -> list:
return list(adjacent_pairs(iterable))[:-1]
class _TexParser(object):
def __init__(self, tex_string: str, additional_substrings: list[str]):
self.tex_string = tex_string
self.whitespace_indices = self.get_whitespace_indices()
self.backslash_indices = self.get_backslash_indices()
self.script_indices = self.get_script_indices()
self.brace_indices_dict = self.get_brace_indices_dict()
self.tex_span_list: list[tuple[int, int]] = []
self.script_span_to_char_dict: dict[tuple[int, int], str] = {}
self.script_span_to_tex_span_dict: dict[
tuple[int, int], tuple[int, int]
] = {}
self.neighbouring_script_span_pairs: list[tuple[int, int]] = []
self.specified_substrings: list[str] = []
self.add_tex_span((0, len(tex_string)))
self.break_up_by_scripts()
self.break_up_by_double_braces()
self.break_up_by_additional_substrings(additional_substrings)
self.tex_span_list.sort(key=lambda t: (t[0], -t[1]))
self.specified_substrings = remove_list_redundancies(
self.specified_substrings
)
self.containing_labels_dict = self.get_containing_labels_dict()
def add_tex_span(self, tex_span: tuple[int, int]) -> None:
if tex_span not in self.tex_span_list:
self.tex_span_list.append(tex_span)
def get_whitespace_indices(self) -> list[int]:
return [
match_obj.start()
for match_obj in re.finditer(r"\s", self.tex_string)
]
def get_backslash_indices(self) -> list[int]:
# Newlines (`\\`) don't count.
return [
match_obj.end() - 1
for match_obj in re.finditer(r"\\+", self.tex_string)
if len(match_obj.group()) % 2 == 1
]
def filter_out_escaped_characters(self, indices) -> list[int]:
return list(filter(
lambda index: index - 1 not in self.backslash_indices,
indices
))
def get_script_indices(self) -> list[int]:
return self.filter_out_escaped_characters([
match_obj.start()
for match_obj in re.finditer(r"[_^]", self.tex_string)
])
def get_brace_indices_dict(self) -> dict[int, int]:
tex_string = self.tex_string
indices = self.filter_out_escaped_characters([
match_obj.start()
for match_obj in re.finditer(r"[{}]", tex_string)
])
result = {}
left_brace_indices_stack = []
for index in indices:
if tex_string[index] == "{":
left_brace_indices_stack.append(index)
else:
left_brace_index = left_brace_indices_stack.pop()
result[left_brace_index] = index
return result
def break_up_by_scripts(self) -> None:
# Match subscripts & superscripts.
tex_string = self.tex_string
whitespace_indices = self.whitespace_indices
brace_indices_dict = self.brace_indices_dict
script_spans = []
for script_index in self.script_indices:
script_char = tex_string[script_index]
extended_begin = script_index
while extended_begin - 1 in whitespace_indices:
extended_begin -= 1
script_begin = script_index + 1
while script_begin in whitespace_indices:
script_begin += 1
if script_begin in brace_indices_dict.keys():
script_end = brace_indices_dict[script_begin] + 1
else:
pattern = re.compile(r"[a-zA-Z0-9]|\\[a-zA-Z]+")
match_obj = pattern.match(tex_string, pos=script_begin)
if not match_obj:
script_name = {
"_": "subscript",
"^": "superscript"
}[script_char]
log.warning(
f"Unclear {script_name} detected while parsing. "
"Please use braces to clarify"
)
continue
script_end = match_obj.end()
tex_span = (script_begin, script_end)
script_span = (extended_begin, script_end)
script_spans.append(script_span)
self.add_tex_span(tex_span)
self.script_span_to_char_dict[script_span] = script_char
self.script_span_to_tex_span_dict[script_span] = tex_span
if not script_spans:
return
_, sorted_script_spans = zip(*sorted([
(index, script_span)
for script_span in script_spans
for index in script_span
]))
for span_0, span_1 in _get_neighbouring_pairs(sorted_script_spans):
if span_0[1] == span_1[0]:
self.neighbouring_script_span_pairs.append((span_0, span_1))
def break_up_by_double_braces(self) -> None:
# Match paired double braces (`{{...}}`).
tex_string = self.tex_string
reversed_indices_dict = dict(
item[::-1] for item in self.brace_indices_dict.items()
)
skip = False
for prev_right_index, right_index in _get_neighbouring_pairs(
list(reversed_indices_dict.keys())
):
if skip:
skip = False
continue
if right_index != prev_right_index + 1:
continue
left_index = reversed_indices_dict[right_index]
prev_left_index = reversed_indices_dict[prev_right_index]
if left_index != prev_left_index - 1:
continue
tex_span = (left_index, right_index + 1)
self.add_tex_span(tex_span)
self.specified_substrings.append(tex_string[slice(*tex_span)])
skip = True
def break_up_by_additional_substrings(
self,
additional_substrings: list[str]
) -> None:
stripped_substrings = sorted(remove_list_redundancies([
string.strip()
for string in additional_substrings
]))
if "" in stripped_substrings:
stripped_substrings.remove("")
tex_string = self.tex_string
all_tex_spans = []
for string in stripped_substrings:
match_objs = list(re.finditer(re.escape(string), tex_string))
if not match_objs:
continue
self.specified_substrings.append(string)
for match_obj in match_objs:
all_tex_spans.append(match_obj.span())
former_script_spans_dict = dict([
script_span_pair[0][::-1]
for script_span_pair in self.neighbouring_script_span_pairs
])
for span_begin, span_end in all_tex_spans:
# Deconstruct spans containing one out of two scripts.
if span_end in former_script_spans_dict.keys():
span_end = former_script_spans_dict[span_end]
if span_begin >= span_end:
continue
self.add_tex_span((span_begin, span_end))
def get_containing_labels_dict(self) -> dict[tuple[int, int], list[int]]:
tex_span_list = self.tex_span_list
result = {
tex_span: []
for tex_span in tex_span_list
}
overlapping_tex_span_pairs = []
for index_0, span_0 in enumerate(tex_span_list):
for index_1, span_1 in enumerate(tex_span_list[index_0:]):
if span_0[1] <= span_1[0]:
continue
if span_0[1] < span_1[1]:
overlapping_tex_span_pairs.append((span_0, span_1))
result[span_0].append(index_0 + index_1)
if overlapping_tex_span_pairs:
tex_string = self.tex_string
log.error("Partially overlapping substrings detected:")
for tex_span_pair in overlapping_tex_span_pairs:
log.error(", ".join(
f"\"{tex_string[slice(*tex_span)]}\""
for tex_span in tex_span_pair
))
raise ValueError
return result
def get_labelled_tex_string(self) -> str:
indices, _, flags, labels = zip(*sorted([
(*tex_span[::(1, -1)[flag]], flag, label)
for label, tex_span in enumerate(self.tex_span_list)
for flag in range(2)
], key=lambda t: (t[0], -t[2], -t[1])))
command_pieces = [
("{{" + self.get_color_command(label), "}}")[flag]
for flag, label in zip(flags, labels)
][1:-1]
command_pieces.insert(0, "")
string_pieces = [
self.tex_string[slice(*tex_span)]
for tex_span in _get_neighbouring_pairs(indices)
]
return "".join(it.chain(*zip(command_pieces, string_pieces)))
@staticmethod
def get_color_command(label: int) -> str:
rg, b = divmod(label, 256)
r, g = divmod(rg, 256)
return "".join([
"\\color[RGB]",
"{",
",".join(map(str, (r, g, b))),
"}"
])
def get_sorted_submob_indices(self, submob_labels: list[int]) -> list[int]:
def script_span_to_submob_range(script_span):
tex_span = self.script_span_to_tex_span_dict[script_span]
submob_indices = [
index for index, label in enumerate(submob_labels)
if label in self.containing_labels_dict[tex_span]
]
return range(submob_indices[0], submob_indices[-1] + 1)
filtered_script_span_pairs = filter(
lambda script_span_pair: all([
self.script_span_to_char_dict[script_span] == character
for script_span, character in zip(script_span_pair, "_^")
]),
self.neighbouring_script_span_pairs
)
switch_range_pairs = sorted([
tuple([
script_span_to_submob_range(script_span)
for script_span in script_span_pair
])
for script_span_pair in filtered_script_span_pairs
], key=lambda t: (t[0].stop, -t[0].start))
result = list(range(len(submob_labels)))
for range_0, range_1 in switch_range_pairs:
result = [
*result[:range_1.start],
*result[range_0.start:range_0.stop],
*result[range_1.stop:range_0.start],
*result[range_1.start:range_1.stop],
*result[range_0.stop:]
]
return result
def get_submob_tex_strings(self, submob_labels: list[int]) -> list[str]:
ordered_tex_spans = [
self.tex_span_list[label] for label in submob_labels
]
ordered_containing_labels = [
self.containing_labels_dict[tex_span]
for tex_span in ordered_tex_spans
]
ordered_span_begins, ordered_span_ends = zip(*ordered_tex_spans)
string_span_begins = [
prev_end if prev_label in containing_labels else curr_begin
for prev_end, prev_label, containing_labels, curr_begin in zip(
ordered_span_ends[:-1], submob_labels[:-1],
ordered_containing_labels[1:], ordered_span_begins[1:]
)
]
string_span_begins.insert(0, ordered_span_begins[0])
string_span_ends = [
next_begin if next_label in containing_labels else curr_end
for next_begin, next_label, containing_labels, curr_end in zip(
ordered_span_begins[1:], submob_labels[1:],
ordered_containing_labels[:-1], ordered_span_ends[:-1]
)
]
string_span_ends.append(ordered_span_ends[-1])
tex_string = self.tex_string
left_brace_indices = sorted(self.brace_indices_dict.keys())
right_brace_indices = sorted(self.brace_indices_dict.values())
ignored_indices = sorted(it.chain(
self.whitespace_indices,
left_brace_indices,
right_brace_indices,
self.script_indices
))
result = []
for span_begin, span_end in zip(string_span_begins, string_span_ends):
while span_begin in ignored_indices:
span_begin += 1
if span_begin >= span_end:
result.append("")
continue
while span_end - 1 in ignored_indices:
span_end -= 1
unclosed_left_brace = 0
unclosed_right_brace = 0
for index in range(span_begin, span_end):
if index in left_brace_indices:
unclosed_left_brace += 1
elif index in right_brace_indices:
if unclosed_left_brace == 0:
unclosed_right_brace += 1
else:
unclosed_left_brace -= 1
result.append("".join([
unclosed_right_brace * "{",
tex_string[span_begin:span_end],
unclosed_left_brace * "}"
]))
return result
def find_span_components_of_custom_span(
self,
custom_span: tuple[int, int]
) -> list[tuple[int, int]] | None:
skipped_indices = sorted(it.chain(
self.whitespace_indices,
self.script_indices
))
tex_span_choices = sorted(filter(
lambda tex_span: all([
tex_span[0] >= custom_span[0],
tex_span[1] <= custom_span[1]
]),
self.tex_span_list
))
# Choose spans that reach the farthest.
tex_span_choices_dict = dict(tex_span_choices)
span_begin, span_end = custom_span
result = []
while span_begin != span_end:
if span_begin not in tex_span_choices_dict.keys():
if span_begin in skipped_indices:
span_begin += 1
continue
return None
next_begin = tex_span_choices_dict[span_begin]
result.append((span_begin, next_begin))
span_begin = next_begin
return result
def get_containing_labels_by_tex_spans(
self,
tex_spans: list[tuple[int, int]]
) -> list[int]:
return remove_list_redundancies(list(it.chain(*[
self.containing_labels_dict[tex_span]
for tex_span in tex_spans
])))
def get_specified_substrings(self) -> list[str]:
return self.specified_substrings
def get_isolated_substrings(self) -> list[str]:
return remove_list_redundancies([
self.tex_string[slice(*tex_span)]
for tex_span in self.tex_span_list
])
class _TexSVG(SVGMobject):
CONFIG = {
"height": None,
"fill_opacity": 1.0,
"stroke_width": 0,
"path_string_config": {
"should_subdivide_sharp_curves": True,
"should_remove_null_curves": True,
},
}
class MTex(_TexSVG):
CONFIG = {
"color": WHITE,
"font_size": 48,
"alignment": "\\centering",
"tex_environment": "align*",
"isolate": [],
"tex_to_color_map": {},
"use_plain_tex": False,
}
def __init__(self, tex_string: str, **kwargs):
digest_config(self, kwargs)
tex_string = tex_string.strip()
# Prevent from passing an empty string.
if not tex_string:
tex_string = "\\quad"
self.tex_string = tex_string
self.parser = _TexParser(
self.tex_string,
[*self.tex_to_color_map.keys(), *self.isolate]
)
super().__init__(**kwargs)
self.set_color_by_tex_to_color_map(self.tex_to_color_map)
self.scale(SCALE_FACTOR_PER_FONT_POINT * self.font_size)
@property
def hash_seed(self) -> tuple:
return (
self.__class__.__name__,
self.svg_default,
self.path_string_config,
self.tex_string,
self.parser.specified_substrings,
self.alignment,
self.tex_environment,
self.use_plain_tex
)
def get_file_path(self) -> str:
return self.get_file_path_(use_plain_tex=self.use_plain_tex)
def get_file_path_(self, use_plain_tex: bool) -> str:
if use_plain_tex:
tex_string = self.tex_string
else:
tex_string = self.parser.get_labelled_tex_string()
full_tex = self.get_tex_file_body(tex_string)
with display_during_execution(f"Writing \"{self.tex_string}\""):
file_path = self.tex_to_svg_file_path(full_tex)
return file_path
def get_tex_file_body(self, tex_string: str) -> str:
if self.tex_environment:
tex_string = "\n".join([
f"\\begin{{{self.tex_environment}}}",
tex_string,
f"\\end{{{self.tex_environment}}}"
])
if self.alignment:
tex_string = "\n".join([self.alignment, tex_string])
tex_config = get_tex_config()
return tex_config["tex_body"].replace(
tex_config["text_to_replace"],
tex_string
)
@staticmethod
def tex_to_svg_file_path(tex_file_content: str) -> str:
return tex_to_svg_file(tex_file_content)
def generate_mobject(self) -> None:
super().generate_mobject()
if not self.use_plain_tex:
labelled_svg_glyphs = self
else:
file_path = self.get_file_path_(use_plain_tex=False)
labelled_svg_glyphs = _TexSVG(file_path)
glyph_labels = [
self.color_to_label(labelled_glyph.get_fill_color())
for labelled_glyph in labelled_svg_glyphs
]
rearranged_submobs = self.rearrange_submobjects(
self.submobjects, glyph_labels
)
self.set_submobjects(rearranged_submobs)
@staticmethod
def color_to_label(color: ManimColor) | |
(len(states.shape) == 2)
return self.embedder(states, stop_gradient=stop_gradient)
def fit(self, states, actions,
rewards, discounts,
next_states):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
rewards: Batch of sequences of rewards.
next_states: Batch of sequences of next states.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
embeddings = self.embedder(states[:, 0, :], stop_gradient=False)
all_pred_values = []
all_pred_rewards = []
all_pred_discounts = []
for idx in range(self.sequence_length):
pred_value = self.f_value(embeddings)[Ellipsis, 0]
pred_reward, pred_discount = tf.unstack(
self.f_out(tf.concat([embeddings, actions[:, idx, :]], -1)),
axis=-1)
pred_embeddings = embeddings + self.f_trans(
tf.concat([embeddings, actions[:, idx, :]], -1))
all_pred_values.append(pred_value)
all_pred_rewards.append(pred_reward)
all_pred_discounts.append(pred_discount)
embeddings = pred_embeddings
last_value = tf.stop_gradient(self.f_value_target(embeddings)[Ellipsis, 0]) / (1 - self.discount)
all_true_values = []
for idx in range(self.sequence_length - 1, -1, -1):
value = self.discount * discounts[:, idx] * last_value + rewards[:, idx]
all_true_values.append(value)
last_value = value
all_true_values = all_true_values[::-1]
reward_error = tf.stack(all_pred_rewards, -1) - rewards
value_error = tf.stack(all_pred_values, -1) - (1 - self.discount) * tf.stack(all_true_values, -1)
reward_loss = tf.reduce_sum(tf.math.square(reward_error), -1)
value_loss = tf.reduce_sum(tf.math.square(value_error), -1)
loss = tf.reduce_mean(reward_loss + value_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
if self.optimizer.iterations % self.target_update_period == 0:
soft_update(self.f_value, self.f_value_target, tau=self.tau)
return {
'embed_loss': loss,
'reward_loss': tf.reduce_mean(reward_loss),
'value_loss': tf.reduce_mean(value_loss),
}
@tf.function
def update_step(self, replay_buffer_iter):
states, actions, rewards, discounts, next_states = next(replay_buffer_iter)
return self.fit(states, actions, rewards, discounts, next_states)
def get_input_state_dim(self):
return self.embedder.embedding_dim
class DiversePolicyLearner(tf.keras.Model):
"""WORK IN PROGRESS.
A learner for expressing diverse policies.
"""
def __init__(self,
state_dim,
action_spec,
embedding_dim = 256,
num_distributions = None,
latent_dim = 64,
latent_distributions = None,
sequence_blocks = 1,
hidden_dims = (256, 256),
sequence_length = 2,
learning_rate = None,
kl_weight = 0.1,
perturbation_scale = 0.1,
reg_weight = 0.):
"""Creates networks.
Args:
state_dim: State size.
action_spec: Action spec.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
latent_dim: Dimension of the latent variable.
latent_distributions: number of categorical distributions
for the latent variable.
sequence_blocks: Number of shifts applied to states and actions.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input
learning_rate: Learning rate.
kl_weight: Weight on KL regularizer.
perturbation_scale: Scale of perturbation.
reg_weight: Weight on discrete embedding regularization.
"""
super().__init__()
self.input_dim = state_dim
self.action_dim = action_spec.shape[0]
self.action_spec = action_spec
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
self.latent_dim = latent_dim
self.latent_distributions = latent_distributions
assert not latent_distributions or latent_dim % latent_distributions == 0
self.sequence_blocks = sequence_blocks
self.sequence_length = sequence_length * self.sequence_blocks
self.kl_weight = kl_weight
self.reg_weight = reg_weight
self.perturbation_scale = perturbation_scale
self.embedder = EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
num_distributions=self.num_distributions,
hidden_dims=hidden_dims)
policy_encoder_in = tf.keras.Input(
shape=(self.sequence_length, self.input_dim + self.action_dim))
preprocess = tf.keras.layers.Dense(256, activation=tf.nn.relu)
transformer_output_dim = (1 if self.latent_distributions else 2) * self.latent_dim
transformer_out = transformer(preprocess(policy_encoder_in),
num_layers=1,
embedding_dim=256,
num_heads=4,
key_dim=256,
ff_dim=256,
output_dim=transformer_output_dim)
policy_encoder_out = tf.reduce_mean(transformer_out, axis=-2)
self.policy_encoder = tf.keras.Model(
inputs=policy_encoder_in, outputs=policy_encoder_out)
self.policy_decoder = policies.DiagGuassianPolicy(
self.embedding_dim + self.latent_dim,
action_spec, apply_tanh_squash=True)
learning_rate = learning_rate or 1e-3
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.log_alpha = tf.Variable(tf.math.log(1.0), trainable=True)
self.target_entropy = -action_spec.shape[0]
self.all_variables = self.variables
self.average_embedding = tf.Variable(tf.zeros([self.embedding_dim]),
trainable=False)
@tf.function
def call(self,
states,
actions = None,
stop_gradient = True):
"""Returns embedding.
Args:
states: A batch of states.
stop_gradient: Whether to stop_gradient.
Returns:
Embedding.
"""
return self.embedder(states, stop_gradient=stop_gradient)
def fit(self, states, actions):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
state_blocks = [states]
action_blocks = [actions]
shifted_states = states
shifted_actions = actions
for _ in range(self.sequence_blocks - 1):
shifted_states = tf.concat([shifted_states[1:], shifted_states[:1]], 0)
shifted_actions = tf.concat([shifted_actions[1:], shifted_actions[:1]], 0)
state_blocks.append(shifted_states)
action_blocks.append(shifted_actions)
states = tf.concat(state_blocks, axis=-2)
actions = tf.concat(action_blocks, axis=-2)
noise = (self.perturbation_scale * tf.random.normal(tf.shape(actions)) *
0.5 * (self.action_spec.maximum - self.action_spec.minimum))
noisy_actions = tf.clip_by_value(actions + noise,
self.action_spec.minimum + 1e-3,
self.action_spec.maximum - 1e-3)
policy_encoder_in = tf.concat([states, noisy_actions], -1)
policy_encoder_out = self.policy_encoder(policy_encoder_in, training=True)
if self.latent_distributions:
all_logits = tf.split(policy_encoder_out,
num_or_size_splits=self.latent_distributions, axis=-1)
all_probs = [tf.nn.softmax(logits, -1) for logits in all_logits]
joined_probs = tf.concat(all_probs, -1)
all_samples = [tfp.distributions.Categorical(logits=logits).sample()
for logits in all_logits]
all_onehot_samples = [tf.one_hot(samples, self.latent_dim // self.latent_distributions)
for samples in all_samples]
joined_onehot_samples = tf.concat(all_onehot_samples, -1)
# Straight-through gradients.
latent_sample = joined_onehot_samples + joined_probs - tf.stop_gradient(joined_probs)
kl_loss = tf.reduce_sum(
joined_probs *
(tf.math.log(float(self.latent_dim // self.latent_distributions)) +
tf.math.log(1e-6 + joined_probs)), -1)
else:
latent_mean, latent_logvar = tf.split(policy_encoder_out, 2, axis=-1)
latent_sample = (latent_mean + tf.random.normal(tf.shape(latent_mean)) *
tf.exp(0.5 * latent_logvar))
kl_loss = -0.5 * tf.reduce_sum(1.0 + latent_logvar - tf.pow(latent_mean, 2) -
tf.exp(latent_logvar), -1)
all_states = tf.reshape(states, [batch_size * self.sequence_length, -1])
all_embed = self.embedder(all_states, stop_gradient=False)
all_latents = tf.repeat(latent_sample, self.sequence_length, axis=0)
policy_decoder_in = tf.concat([all_embed, all_latents], -1)
all_noisy_actions = tf.reshape(noisy_actions, [batch_size * self.sequence_length, -1])
action_log_probs = self.policy_decoder.log_probs(policy_decoder_in, all_noisy_actions)
_, policy_log_probs = self.policy_decoder(policy_decoder_in, sample=True, with_log_probs=True)
alpha = tf.exp(self.log_alpha)
alpha_loss = alpha * tf.stop_gradient(-policy_log_probs - self.target_entropy)
reconstruct_loss = -tf.reduce_sum(
tf.reshape(action_log_probs - tf.stop_gradient(alpha) * policy_log_probs,
[batch_size, self.sequence_length]), -1)
self.average_embedding.assign(0.99 * self.average_embedding +
0.01 * tf.reduce_mean(all_embed, 0))
if self.num_distributions:
regularization = tf.reduce_sum(all_embed / (1e-6 + tf.stop_gradient(self.average_embedding)), -1)
regularization = tf.reduce_sum(tf.reshape(regularization, [batch_size, self.sequence_length]), -1)
entropy = -tf.reduce_sum(self.average_embedding * tf.math.log(1e-6 + self.average_embedding))
else:
regularization = 0.0
entropy = 0.0
loss = tf.reduce_mean(reconstruct_loss + self.kl_weight * kl_loss +
self.reg_weight * regularization) + tf.reduce_mean(alpha_loss)
grads = tape.gradient(loss, self.all_variables)
self.optimizer.apply_gradients(
zip(grads, self.all_variables))
return {
'embed_loss': loss,
'alpha': alpha,
'reconstruct_loss': tf.reduce_mean(reconstruct_loss),
'latent_kl_loss': tf.reduce_mean(kl_loss),
'regularization': tf.reduce_mean(regularization),
'entropy': tf.reduce_mean(entropy),
}
@tf.function
def update_step(self, replay_buffer_iter):
states, actions, _, _, _ = next(replay_buffer_iter)
return self.fit(states, actions)
def get_input_state_dim(self):
return self.embedder.embedding_dim
class SuperModelLearner(tf.keras.Model):
"""A learner for model-based representation learning.
Encompasses forward models, inverse models, as well as latent models like
DeepMDP.
"""
def __init__(self,
state_dim,
action_spec,
embedding_dim = 256,
num_distributions = None,
hidden_dims = (256, 256),
sequence_length = 2,
learning_rate = None,
latent_dim = 256,
reward_weight = 1.0,
forward_weight = 1.0, # Predict last state given prev actions/states.
inverse_weight = 1.0, # Predict last action given states.
state_prediction_mode = 'energy'):
"""Creates networks.
Args:
state_dim: State size.
action_spec: Action spec.
embedding_dim: Embedding size.
num_distributions: Number of categorical distributions
for discrete embedding.
hidden_dims: List of hidden dimensions.
sequence_length: Expected length of sequences provided as input
learning_rate: Learning rate.
latent_dim: Dimension of the latent variable.
reward_weight: Weight on the reward loss.
forward_weight: Weight on the forward loss.
inverse_weight: Weight on the inverse loss.
state_prediction_mode: One of ['latent', 'energy'].
"""
super().__init__()
self.input_dim = state_dim
self.action_dim = action_spec.shape[0]
self.action_spec = action_spec
self.embedding_dim = embedding_dim
self.num_distributions = num_distributions
self.sequence_length = sequence_length
self.latent_dim = latent_dim
self.reward_weight = reward_weight
self.forward_weight = forward_weight
self.inverse_weight = inverse_weight
self.state_prediction_mode = state_prediction_mode
self.embedder = EmbedNet(
state_dim,
embedding_dim=self.embedding_dim,
num_distributions=self.num_distributions,
hidden_dims=hidden_dims)
if self.sequence_length > 2:
self.latent_embedder = RNNEmbedNet(
[self.sequence_length - 2, self.embedding_dim + self.action_dim],
embedding_dim=self.latent_dim)
self.reward_decoder = EmbedNet(
self.latent_dim + self.embedding_dim + self.action_dim,
embedding_dim=1,
hidden_dims=hidden_dims)
self.inverse_decoder = policies.DiagGuassianPolicy(
2 * self.embedding_dim + self.latent_dim,
action_spec, apply_tanh_squash=True)
forward_decoder_out = (self.embedding_dim
if (self.state_prediction_mode in ['latent', 'energy']) else
self.input_dim)
forward_decoder_dists = (self.num_distributions
if (self.state_prediction_mode in ['latent', 'energy']) else
None)
self.forward_decoder = StochasticEmbedNet(
self.latent_dim + self.embedding_dim + self.action_dim,
embedding_dim=forward_decoder_out,
num_distributions=forward_decoder_dists,
hidden_dims=hidden_dims)
self.weight = tf.Variable(tf.eye(self.embedding_dim))
learning_rate = learning_rate or 1e-4
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.log_alpha = tf.Variable(tf.math.log(1.0), trainable=True)
self.target_entropy = -action_spec.shape[0]
self.all_variables = self.variables
@tf.function
def call(self,
states,
actions = None,
stop_gradient = True):
"""Returns embedding.
Args:
states: A batch of states.
stop_gradient: Whether to stop_gradient.
Returns:
Embedding.
"""
return self.embedder(states, stop_gradient=stop_gradient)
def compute_energy(self, embeddings,
other_embeddings):
"""Computes matrix of energies between every pair of (embedding, other_embedding)."""
transformed_embeddings = tf.matmul(embeddings, self.weight)
energies = tf.matmul(transformed_embeddings, other_embeddings, transpose_b=True)
return energies
def fit(self, states, actions,
rewards):
"""Updates critic parameters.
Args:
states: Batch of sequences of states.
actions: Batch of sequences of actions.
Returns:
Dictionary with information to track.
"""
batch_size = tf.shape(states)[0]
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.all_variables)
all_states = tf.reshape(states, [batch_size * self.sequence_length, self.input_dim])
all_embeddings = self.embedder(all_states, stop_gradient=False)
embeddings = tf.reshape(all_embeddings, [batch_size, self.sequence_length, self.embedding_dim])
if self.sequence_length > 2:
latent_embedder_in = tf.concat([embeddings[:, :-2, :], actions[:, :-2, :]], -1)
latent = self.latent_embedder(latent_embedder_in, stop_gradient=False)
else:
latent = tf.zeros([batch_size, self.latent_dim])
reward_decoder_in = tf.concat([latent, embeddings[:, -2, :], actions[:, -2, :]], -1)
reward_pred = self.reward_decoder(reward_decoder_in, stop_gradient=False)
reward_loss = tf.square(rewards[:, -2] - reward_pred[Ellipsis, 0])
inverse_decoder_in = tf.concat([latent, embeddings[:, -2, :], embeddings[:, -1, :]], -1)
action_log_probs = self.inverse_decoder.log_probs(inverse_decoder_in, actions[:, -2, :])
_, policy_log_probs = self.inverse_decoder(inverse_decoder_in, sample=True, with_log_probs=True)
alpha = tf.exp(self.log_alpha)
alpha_loss = alpha * | |
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This uses the i2c-pseudo driver to implement a local Linux I2C adapter of a
Servo/DUT I2C bus.
"""
import collections
import errno
import logging
import os
import select
import threading
_CONTROLLER_DEVICE_PATH = b'/dev/i2c-pseudo-controller'
_CMD_END_CHAR = b'\n'
_HEADER_SEP_CHAR = b' '
_DATA_SEP_CHAR = b':'
# This is a printf()-style format string for the i2c-pseudo I2C_XFER_REPLY
# write command.
#
# The positional fields are, in order:
# xfer_id: int or ctypes.c_ubyte or equivalent
# msg_id: int or ctypes.c_ubyte or equivalent
# addr: int or ctypes.c_ubyte or equivalent
# flags: int or ctypes.c_ubyte or equivalent
# errno: int or ctypes.c_ubyte or equivalent
#
# See Documentation/i2c/pseudo-controller-interface from Linux for more details.
_I2C_XFER_REPLY_FMT_STR = _HEADER_SEP_CHAR.join((
b'I2C_XFER_REPLY', b'%d', b'%d', b'0x%04X', b'0x%04X', b'%d'))
_READ_SIZE = 1024
_I2C_ADAPTER_TIMEOUT_MS = 5000
_EPOLL_EVENTMASK_NO_WRITES = select.EPOLLIN | select.EPOLLERR | select.EPOLLHUP
_EPOLL_EVENTMASK_WITH_WRITES = _EPOLL_EVENTMASK_NO_WRITES | select.EPOLLOUT
# This value is guaranteed per include/uapi/linux/i2c.h
_I2C_M_RD = 0x0001
# This value is implicitly subject to change.
_I2C_M_RECV_LEN = 0x0400
def default_controller_path():
"""Get the default i2c-pseudo controller device path.
Returns:
bytes - absolute path
"""
path = _CONTROLLER_DEVICE_PATH
assert os.path.isabs(path)
return path
class I2cPseudoAdapter(object):
"""This class implements a Linux I2C adapter for the servo I2C bus.
This class is a controller for the i2c-pseudo Linux kernel module. See its
documentation for background.
Thread safety:
This class is internally multi-threaded.
It is safe to use the public interface from multiple threads concurrently.
Usage:
adap = I2cPseudoAdapter.make_with_default_path(i2c_bus)
i2c_id = adap.start()
...
adap.shutdown()
"""
@staticmethod
def make_with_default_path(i2c_bus):
"""Make an instance using the default i2c-pseudo controller device path.
Args:
i2c_bus: implementation of i2c_base.BaseI2CBus
Returns:
I2cPseudoAdapter
"""
return I2cPseudoAdapter(default_controller_path(), i2c_bus)
def __init__(self, controller_device_path, i2c_bus):
"""Initializer. Does NOT create the pseudo adapter.
Args:
controller_device_path: bytes or str - path to the i2c-pseudo controller
device file
i2c_bus: implementation of i2c_base.BaseI2CBus
"""
self._logger = logging.getLogger('i2c_pseudo')
self._logger.info(
'attempting to initialize (not start yet!) I2C pseudo adapter '
'controller_device_path=%r i2c_bus=%r' %
(controller_device_path, i2c_bus))
self._i2c_bus = i2c_bus
self._controller_device_path = controller_device_path
self._device_fd = None
self._i2c_pseudo_id = None
self._i2c_adapter_num = None
self._epoll = None
self._device_eventmask_lock = threading.Lock()
self._device_epoll_eventmask = _EPOLL_EVENTMASK_NO_WRITES
self._io_thread = None
self._xfer_reqs = []
self._in_tx = False
self._device_read_buffers = []
self._device_read_post_newline_idx = 0
self._device_write_lock = threading.Lock()
# self._device_write_lock must be held while popping items, processing
# items, or appending to the right side. That lock does NOT need to be held
# when appending items to the left side.
self._device_write_queue = collections.deque()
self._startstop_lock = threading.Lock()
self._started = False
self._logger.info(
'finished initializing I2C pseudo adapter (not started yet!)')
def start(self):
"""Create and start the i2c-pseudo adapter.
This method may be invoked repeatedly, including overlapping invocations
from multiple threads. Redundant invocations are a no-op. When any one
invocation has returned successfully (no exceptions), the I2C pseudo adapter
has been started.
If an invocation fails with an exception, the state of the object is
undefined, and it should be abandoned.
This MUST NOT be called during or after shutdown().
"""
self._logger.info('attempting to start I2C pseudo adapter')
with self._startstop_lock:
assert self._started is not None
if self._started:
self._logger.warn('I2C pseudo adapter already started')
return
self._started = True
self._device_fd = os.open(self._controller_device_path,
os.O_RDWR | os.O_NONBLOCK)
self._epoll = select.epoll(sizehint=2)
self._epoll.register(self._device_fd, self._device_epoll_eventmask)
self._io_thread = threading.Thread(
name='I2C-Pseudo-PyID-0x%X' % (id(self),),
target=self._io_thread_run)
self._io_thread.daemon = True
self._io_thread.start()
self._enqueue_simple_ctrlr_cmd((b'GET_PSEUDO_ID',))
self._enqueue_simple_ctrlr_cmd((
b'SET_ADAPTER_NAME_SUFFIX', b'(servod pid %d)' % (os.getpid(),)))
self._enqueue_simple_ctrlr_cmd((
b'SET_ADAPTER_TIMEOUT_MS', b'%d' % (_I2C_ADAPTER_TIMEOUT_MS,)))
self._enqueue_simple_ctrlr_cmd((b'ADAPTER_START',))
self._enqueue_simple_ctrlr_cmd((b'GET_ADAPTER_NUM',))
self._do_device_writes()
self._logger.info('finished starting I2C pseudo adapter')
@property
def i2c_bus(self):
"""Get the i2c_base.BaseI2CBus implementation this object is using.
Returns:
i2c_base.BaseI2CBus
"""
return self._i2c_bus
@property
def controller_device_path(self):
"""Get the i2c-pseudo controller device file this object is using.
Returns:
bytes or str - path to the i2c-pseudo controller device file
"""
return self._controller_device_path
@property
def i2c_pseudo_id(self):
"""Get the i2c-pseudo controller ID.
Returns:
None or int - The i2c-pseudo controller ID, or None if start() has not
completed yet.
"""
return self._i2c_pseudo_id
@property
def i2c_adapter_num(self):
"""Get the Linux I2C adapter number.
Returns:
None or int - The Linux I2C adapter number, or None if start() has not
completed yet.
"""
return self._i2c_adapter_num
@property
def is_running(self):
"""Check whether the pseudo controller is running.
Returns:
bool - True if the pseudo controller and its I/O thread are running, False
otherwise, e.g. if the controller was either never started, or has
been shutdown.
"""
return self._io_thread is not None and self._io_thread.is_alive()
def _reset_tx(self, in_tx):
"""Delete any queued I2C transfer requests and reset transaction state.
Args:
in_tx: bool - The internal transaction state is set to this value.
"""
del self._xfer_reqs[:]
self._in_tx = in_tx
def _cmd_i2c_begin_xfer(self, line):
"""Allow queueing of I2C transfer requests.
This always resets the internal transaction state to be in a transaction and
have no I2C transfer requests queued.
Args:
line: str - The I2C_BEGIN_XFER line read from the i2c-pseudo device.
"""
try:
assert not self._in_tx
finally:
self._reset_tx(True)
def _cmd_i2c_commit_xfer(self, line):
"""Perform the queued I2C transaction.
This always resets the internal transaction state to not be in a transaction
and have no I2C transfer requests queued.
Args:
line: str - The I2C_COMMIT_XFER line read from the i2c-pseudo device.
"""
try:
self._cmd_i2c_commit_xfer_internal(line)
finally:
self._reset_tx(False)
def _cmd_i2c_commit_xfer_internal(self, line):
"""Perform the queued I2C transaction.
Invocations to this should be wrapped in try:/finally: to always reset the
internal transaction state, regardless of success or failure.
Args:
line: str - The I2C_COMMIT_XFER line read from the i2c-pseudo device.
"""
assert self._in_tx
if not self._xfer_reqs:
return
assert len(self._xfer_reqs) <= 2
assert len(set(xfer_id for xfer_id, _, _, _, _, _ in self._xfer_reqs)) == 1
assert len(set(addr for _, _, addr, _, _, _ in self._xfer_reqs)) == 1
write_idx = None
write_list = None
read_idx = None
read_count = None
read_flags = 0
retval = None
errnum = 0
for xfer_id, idx, addr, flags, length, data in self._xfer_reqs:
# This option is not supported by the self._i2c_bus interface.
assert not flags & _I2C_M_RECV_LEN
if flags & _I2C_M_RD:
read_idx = idx
read_count = length
read_flags = flags
else:
write_idx = idx
# TODO(b/79684405): This is silly, wr_rd() often/always converts back to
# byte array, i.e. Python 2 str / Python 3 bytes, using chr(). Update
# servo I2C bus interface to accept byte array.
write_list = [ord(c) for c in data]
write_flags = flags
try:
retval = self._i2c_bus.wr_rd(addr, write_list, read_count)
except (OSError, IOError) as error:
self._logger.exception('self._i2c_bus.wr_rd() raised %s' % (error,))
errnum = error.errno or 1
writes = []
if write_idx is not None:
writes.append(_I2C_XFER_REPLY_FMT_STR % (
xfer_id, write_idx, addr, write_flags, errnum))
writes.append(_CMD_END_CHAR)
if read_idx is not None:
writes.append(_I2C_XFER_REPLY_FMT_STR % (
xfer_id, read_idx, addr, read_flags, errnum))
if retval:
writes.append(_HEADER_SEP_CHAR)
writes.append(_DATA_SEP_CHAR.join(b'%02X' % (b,) for b in retval))
writes.append(_CMD_END_CHAR)
if writes:
self._device_write_queue.extend(writes)
self._do_device_writes()
def _cmd_i2c_xfer_req(self, line):
"""Queue an I2C transfer request. Must already be in a transaction.
Args:
line: str - The I2C_XFER_REQ line read from the i2c-pseudo device.
"""
assert self._in_tx
xfer_id, idx, addr, flags, length = line.split(_HEADER_SEP_CHAR, 6)[1:6]
xfer_id = int(xfer_id, base=0)
idx = int(idx, base=0)
addr = int(addr, base=0)
flags = int(flags, base=0)
length = int(length, base=0)
if flags & _I2C_M_RD:
data = None
else:
parts = line.split(_HEADER_SEP_CHAR, 6)
if len(parts) < 7:
# The data field is absent, handle it the same as an empty data field.
data = b''
else:
data = b''.join(
chr(int(hex_, 16)) for hex_ in parts[6].split(_DATA_SEP_CHAR))
self._xfer_reqs.append((xfer_id, idx, addr, flags, length, data))
def _cmd_i2c_adap_num(self, line):
"""Record the I2C adapter number of this I2C pseudo controller.
Args:
line: str - The I2C_ADAPTER_NUM line read from the i2c-pseudo device.
"""
self._i2c_adapter_num = int(line.split(_HEADER_SEP_CHAR, 2)[1])
self._logger.info('I2C adapter number: %d' % (self._i2c_adapter_num,))
def _cmd_i2c_pseudo_id(self, line):
"""Record the I2C pseudo ID of this I2C pseudo controller.
Args:
line: str - The I2C_PSEUDO_ID line read from the i2c-pseudo device.
"""
self._i2c_pseudo_id = int(line.split(_HEADER_SEP_CHAR, 2)[1])
self._logger.info('I2C pseudo ID: %d' % (self._i2c_pseudo_id,))
def _do_ctrlr_cmd(self, line):
"""Dispatch an I2C pseudo controller command to the appropriate handler.
Args:
line: A full read command line from the i2c-pseudo controller device.
Must NOT contain the commmand-terminating character (_CMD_END_CHAR).
"""
if not line:
return
assert _CMD_END_CHAR not in line
cmd_name = line.split(_HEADER_SEP_CHAR, 1)[0]
if | |
<filename>services/Solver/utils/table.py
import config
from utils.wikidata_util import get_wiki_type
import re
import traceback
from tabulate import tabulate
from audit.audit import Audit
from checkpoint.checkpoint import CheckPoint
class ParsedTable():
def __init__(self, raw, targets):
"""
parse the raw input into cell and col objects to later processing
for structure of the objects see parse()
"""
# internal state
self.name = raw['name']
self.rowcount = len(raw['data'][0]['original_cell_vals'])
self._cells = []
self._cols = []
self._targets = targets
# init CEA & CTA
self.parse(raw, targets)
# init CPA
self._colpairs = []
self._cellpairs = []
self.initColPairs(targets)
# collect non-fatal errors
self._errors = []
# init audit manager for this table
self.audit = Audit(self.name)
# init checkPoint manager for this table
self.checkPoint = CheckPoint(self.name)
# ~~~~~~~~~~~~~~~~~~~~~~~ Accessor Functions ~~~~~~~~~~~~~~~~~~~~~~~
def getSubjectCols(self):
"""
get the ID of the subject column
if not deducable from targets, we assume the first column to be the subject
"""
result = list(set(pair['subj_id'] for pair in self._colpairs))
if len(result) > 0:
return result
else:
return [0]
def getCell(self, col_id, row_id):
"""
get an individual cell
"""
col_id = int(col_id)
row_id = int(row_id)
for cell in self._cells:
if (cell['col_id'] == col_id) and (cell['row_id'] == row_id):
return cell
raise Exception('Could not find cell for column {} and row {}'.format(col_id, row_id))
def getCells(self, col_id=None, row_id=None, onlyObj=False, unsolved=False):
"""
get a list of cells according to the given restrictions
"""
# make everything to lists
if col_id is not None and not isinstance(col_id, list) and not isinstance(col_id, range):
col_id = [col_id]
if row_id is not None and not isinstance(row_id, list) and not isinstance(col_id, range):
row_id = [row_id]
# get the cells
result = self._cells
if col_id:
result = [cell for cell in result if (cell['col_id'] in col_id)]
if row_id:
result = [cell for cell in result if (cell['row_id'] in row_id)]
if onlyObj:
result = [cell for cell in result if (cell['type'] == config.OBJ_COL)]
if unsolved:
result = [cell for cell in result if ('sel_cand' not in cell or not cell['sel_cand'])]
return result
def getCol(self, col_id):
"""
get an individual column
"""
col_id = int(col_id)
for col in self._cols:
if (col['col_id'] == col_id):
return col
raise Exception('Could not find column for {}'.format(col_id))
def getCols(self, onlyObj=False, col_id=None, unsolved=False):
"""
get all columns
"""
# make everything to lists
if col_id is not None and not isinstance(col_id, list):
col_id = [col_id]
# get the columns
result = self._cols
if col_id is not None:
result = [item for item in result if (item['col_id'] in col_id)]
if onlyObj:
result = [col for col in result if col['type'] == config.OBJ_COL]
if unsolved:
result = [col for col in result if ('sel_cand' not in col or not col['sel_cand'])]
return result
def getColPairs(self, onlyObj=False, onlyLit=False, subj_id=None, obj_id=None, unsolved=False):
"""
get pairs of columns (CPA task)
"""
result = self._colpairs
if onlyObj:
result = [pair for pair in self._colpairs if (pair['obj_type'] == config.OBJ_COL)]
if onlyLit:
result = [pair for pair in self._colpairs if (pair['obj_type'] != config.OBJ_COL)]
if subj_id is not None:
if not isinstance(subj_id, list):
subj_id = [subj_id]
result = [item for item in result if (item['subj_id'] in subj_id)]
if obj_id is not None:
if not isinstance(obj_id, list):
obj_id = [obj_id]
result = [item for item in result if (item['obj_id'] in obj_id)]
if unsolved:
result = [item for item in result if ('sel_cand' not in item or not item['sel_cand'])]
return result
def getCellPairs(self, subj_id=None, obj_id=None, row_id=None, subj_cand=None, unsolved=False):
"""
get pairs of cells (CPA task)
"""
result = self._cellpairs
if subj_id is not None:
if not isinstance(subj_id, list):
subj_id = [subj_id]
result = [item for item in result if (item['subj_id'] in subj_id)]
if obj_id is not None:
if not isinstance(obj_id, list):
obj_id = [obj_id]
result = [item for item in result if (item['obj_id'] in obj_id)]
if row_id is not None:
if not isinstance(row_id, list):
row_id = [row_id]
result = [item for item in result if (item['row_id'] in row_id)]
if subj_cand is not None:
if not isinstance(subj_cand, list):
subj_cand = [subj_cand]
result = [item for item in result if (item['subj_cand'] in subj_cand)]
if unsolved:
result = [item for item in result if ('sel_cand' not in item or not item['sel_cand'])]
return result
def isTarget(self, col_id=None, row_id=None, subj_id=None, obj_id=None):
"""
check if the referred to cell/col/pair is requested
"""
# default if deactivated)
if not config.DEDUCE_FROM_TARGETS:
return True
# CEA
if col_id is not None and row_id is not None:
return 0 < len(
[cell for cell in self._targets['cea'] if (cell['row_id'] == row_id) and (cell['col_id'] == col_id)])
# CTA
if col_id is not None:
return 0 < len([col for col in self._targets['cta'] if (col['col_id'] == col_id)])
# CPA
if subj_id is not None and obj_id is not None:
return 0 < len(
[pair for pair in self._targets['cpa'] if (pair['subj_id'] == subj_id) and (pair['obj_id'] == obj_id)])
# if none hit so far, the input is invalid
raise Exception('Invalid input')
def getTargets(self, cea=None, cta=None, cpa=None):
"""
retrieve the targeted objects of the respective type
"""
if cea is not None:
targets = set([(cell['col_id'], cell['row_id']) for cell in self._targets['cea']])
result = []
for cell in self._cells:
if (cell['col_id'], cell['row_id']) in targets:
result.append(cell)
return result
if cta is not None:
targets = set([col['col_id'] for col in self._targets['cta']])
result = []
for col in self._cols:
if col['col_id'] in targets:
result.append(col)
return result
if cpa is not None:
return self.getColPairs()
# ~~~~~~~~~~~~~~~~~~~~~~~ Parser Functions ~~~~~~~~~~~~~~~~~~~~~~~
def __get_col_sel_cand(self, col_id, cta_targets_lst):
if not cta_targets_lst:
return None
mapped_lst = [t['mapped'] for t in cta_targets_lst if t['col_id'] == col_id]
if not mapped_lst or \
mapped_lst[0] is None or \
mapped_lst[0] == "":
return None
return {'uri': mapped_lst[0]}
def __get_cell_sel_cand(self, col_id, row_id, cea_targets_lst):
if not cea_targets_lst:
return None
mapped_lst = [t['mapped'] for t in cea_targets_lst if t['col_id'] == col_id and t['row_id'] == row_id]
if not mapped_lst or \
mapped_lst[0] is None or \
mapped_lst[0] == "":
return None
return {'uri': mapped_lst[0], 'labels': []}
def parse(self, raw, targets):
"""
parse the raw table structure as received by the Runner into our local structure
"""
if self._cells or self._cols:
raise Exception('raw table data already parsed')
# get object columns as requested by targets
objectColTargets = set([item['col_id'] for item in targets['cea']])
for rawCol in raw['data']:
# create structure for col object
sel_cand = self.__get_col_sel_cand(int(rawCol['col_id']), targets['cta'])
col = {
'col_id': int(rawCol['col_id']),
'type': rawCol['type'],
'lang': rawCol['lang'] if ('lang' in rawCol) else None,
'cand': [sel_cand] if sel_cand is not None else [],
'sel_cand': sel_cand,
'sel_cands': None
}
self._cols.append(col)
# overwrite type when column is part of CEA targets
if config.DEDUCE_FROM_TARGETS:
if col['col_id'] in objectColTargets:
col['type'] = config.OBJ_COL
elif col['type'] == config.OBJ_COL:
col['type'] = 'string'
# create cell objects
for row_id in range(len(rawCol['original_cell_vals'])):
# skip the generic col_x cells
if config.DEDUCE_FROM_TARGETS and re.match(r"^col_?\d+$", rawCol['original_cell_vals'][row_id]):
continue
sel_cand = self.__get_cell_sel_cand(col['col_id'], row_id, targets['cea'])
self._cells.append({
'col_id': col['col_id'],
'row_id': row_id,
'type': col['type'],
'wikitype': get_wiki_type(col['type']),
'lang': col['lang'],
'value': rawCol['original_cell_vals'][row_id],
'clean_val': rawCol['clean_cell_vals'][row_id],
'autocorrect_val': rawCol['autocorrect_cell_vals'][
row_id] if 'autocorrect_cell_vals' in rawCol else None,
'cand': [sel_cand] if sel_cand else [],
'purged_cand': [],
'types': [],
'sel_cand': sel_cand,
})
def initColPairs(self, targets):
"""
based on the targets, create the according column pair objects
"""
for target in targets['cpa']:
objCol = self.getCol(target['obj_id'])
self._colpairs.append({
'subj_id': target['sub_id'],
'obj_id': target['obj_id'],
'obj_type': objCol['type'],
'cand': [{'prop': target['mapped']}] if (target != "") else [],
'sel_cand': target['mapped'] if (target != "") else None,
})
def initCellPairs(self, row_id=None):
"""
based on already generated CEA candidates, create the respective CPA objects for each candidate
(still need to be aggregated on the column level)
"""
if self._cellpairs and row_id is None:
raise Exception('Cell pairs already initialized!')
for colPair in self._colpairs:
# get all cells from the subject column
sub_cells = self.getCells(col_id=colPair['subj_id'], row_id=row_id)
for subj_cell in sub_cells:
# grab the corresponding cell
obj_cell = self.getCell(col_id=colPair['obj_id'], row_id=subj_cell['row_id'])
# object vs literal columns
if colPair['obj_type'] == config.OBJ_COL:
for subj_cand in subj_cell['cand']:
for obj_cand in obj_cell['cand']:
self._cellpairs.append({
'subj_id': subj_cell['col_id'],
'obj_id': obj_cell['col_id'],
'row_id': subj_cell['row_id'],
'subj_cand': subj_cand['uri'],
'obj_cand': obj_cand['uri'],
'type': config.OBJ_COL,
'cand': [{'prop': colPair['sel_cand']}] if colPair['sel_cand'] else [],
'sel_cand': colPair['sel_cand'],
})
else:
for subj_cand in subj_cell['cand']:
self._cellpairs.append({
'subj_id': subj_cell['col_id'],
| |
flag')
__repr__ = _swig_repr
NONE = _gskernel.GsBuildingFillSymbol_NONE
r""" 不设置"""
CONSTANTNUM = _gskernel.GsBuildingFillSymbol_CONSTANTNUM
r""" 常数"""
FIELDKEY = _gskernel.GsBuildingFillSymbol_FIELDKEY
r""" 字段"""
AUTOCAL = _gskernel.GsBuildingFillSymbol_AUTOCAL
r""" 脚本计算"""
def __init__(self):
_gskernel.GsBuildingFillSymbol_swiginit(self, _gskernel.new_GsBuildingFillSymbol())
def ViewAngle(self, *args) -> "void":
r"""
*Overload 1:*
视角
|
*Overload 2:*
视角
"""
return _gskernel.GsBuildingFillSymbol_ViewAngle(self, *args)
def ExtrusionBase(self) -> "int":
r""" 基础,必须要小于或者等于高度"""
return _gskernel.GsBuildingFillSymbol_ExtrusionBase(self)
def ExtrusionHeightMode(self) -> "GsBuildingFillSymbol::GsExtrusionHeightMode":
r""" 获取高度的方式"""
return _gskernel.GsBuildingFillSymbol_ExtrusionHeightMode(self)
def ExtrusionHeight(self, *args) -> "void":
r"""
*Overload 1:*
高度(地理距离,单位米)
|
*Overload 2:*
高度(地理距离,单位米)
"""
return _gskernel.GsBuildingFillSymbol_ExtrusionHeight(self, *args)
def ExtrusionHeightKey(self, *args) -> "void":
r"""
*Overload 1:*
高度字段
|
*Overload 2:*
高度字段
"""
return _gskernel.GsBuildingFillSymbol_ExtrusionHeightKey(self, *args)
def Type(self) -> "GsSymbolType":
return _gskernel.GsBuildingFillSymbol_Type(self)
def Style(self, *args) -> "void":
r"""
*Overload 1:*
画笔的式样
|
*Overload 2:*
画笔的式样
"""
return _gskernel.GsBuildingFillSymbol_Style(self, *args)
def IsValid(self) -> "bool":
r""" 符号是否有效 符号是否会在画布上绘制内容,如果符号不会在画布上绘制任何的内容则不需要执行绘制过程。"""
return _gskernel.GsBuildingFillSymbol_IsValid(self)
def DrawTop(self, pGeo: 'GsGeometry') -> "void":
r""" 绘制顶部"""
return _gskernel.GsBuildingFillSymbol_DrawTop(self, pGeo)
__swig_destroy__ = _gskernel.delete_GsBuildingFillSymbol
# Register GsBuildingFillSymbol in _gskernel:
_gskernel.GsBuildingFillSymbol_swigregister(GsBuildingFillSymbol)
eTextStyleNormal = _gskernel.eTextStyleNormal
r""" 无倾斜 ━━ ┃ ┃ ┃ ┃ ━━ """
eTextStyleLeftItalic = _gskernel.eTextStyleLeftItalic
r""" 左倾 ━━ ╲ ╲ ╲ ╲ ━━"""
eTextStyleRightItalic = _gskernel.eTextStyleRightItalic
r""" 右倾 ━━ ╱ ╱ ╱ ╱ ━━"""
eTextStyleLeftShrug = _gskernel.eTextStyleLeftShrug
r""" 右耸 ┃╲ ┃ ╲ ╲ ┃ ╲┃"""
eTextStyleRightShrug = _gskernel.eTextStyleRightShrug
r""" 右耸 ╱┃ ╱ ┃ ┃ ╱ ┃╱"""
class GsTextSymbol(GsSymbol):
r""" 文本符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
r""" 默认构造函数"""
_gskernel.GsTextSymbol_swiginit(self, _gskernel.new_GsTextSymbol())
__swig_destroy__ = _gskernel.delete_GsTextSymbol
def TextStyle(self, *args) -> "GsTextStyle":
r"""
*Overload 1:*
设置倾斜类型
|
*Overload 2:*
获取倾斜类型
"""
return _gskernel.GsTextSymbol_TextStyle(self, *args)
def Bold(self, *args) -> "bool":
r"""
*Overload 1:*
设置加粗标识
|
*Overload 2:*
获取加粗标识
"""
return _gskernel.GsTextSymbol_Bold(self, *args)
def Hollow(self, *args) -> "bool":
r"""
*Overload 1:*
设置空心标识
|
*Overload 2:*
获取空心标识
"""
return _gskernel.GsTextSymbol_Hollow(self, *args)
def UnderLine(self, *args) -> "bool":
r"""
*Overload 1:*
设置下划线标识
|
*Overload 2:*
获取下划线标识
"""
return _gskernel.GsTextSymbol_UnderLine(self, *args)
def StrikeOut(self, *args) -> "bool":
r"""
*Overload 1:*
设置删除线标识
|
*Overload 2:*
获取删除线标识
"""
return _gskernel.GsTextSymbol_StrikeOut(self, *args)
def Color(self, *args) -> "void":
r"""
*Overload 1:*
返回符号的颜色
|
*Overload 2:*
设置符号的颜色
"""
return _gskernel.GsTextSymbol_Color(self, *args)
def Font(self, *args) -> "void":
r"""
*Overload 1:*
返回符号的字体
|
*Overload 2:*
设置符号的字体
"""
return _gskernel.GsTextSymbol_Font(self, *args)
def Size(self, *args) -> "void":
r"""
*Overload 1:*
返回符号的大小
|
*Overload 2:*
设置符号的大小
"""
return _gskernel.GsTextSymbol_Size(self, *args)
def Text(self, *args) -> "void":
r"""
*Overload 1:*
返回符号的文本
|
*Overload 2:*
设置符号的文本
"""
return _gskernel.GsTextSymbol_Text(self, *args)
def HorizonAlign(self, *args) -> "void":
r"""
*Overload 1:*
获取水平对齐方式
|
*Overload 2:*
设置水平对齐方式
"""
return _gskernel.GsTextSymbol_HorizonAlign(self, *args)
def VerticalAlign(self, *args) -> "void":
r"""
*Overload 1:*
获取垂直对齐方式
|
*Overload 2:*
设置垂直对齐方式
"""
return _gskernel.GsTextSymbol_VerticalAlign(self, *args)
def Width(self, *args) -> "void":
r"""
*Overload 1:*
获取注记宽度
|
*Overload 2:*
设置注记高度
"""
return _gskernel.GsTextSymbol_Width(self, *args)
def Height(self, *args) -> "void":
r"""
*Overload 1:*
获取注记高度
|
*Overload 2:*
设置注记高度
"""
return _gskernel.GsTextSymbol_Height(self, *args)
def StyleAngle(self, *args) -> "void":
r"""
*Overload 1:*
获取字体风格的倾斜角度
|
*Overload 2:*
设置字体风格的倾斜角度
"""
return _gskernel.GsTextSymbol_StyleAngle(self, *args)
def Angle(self, *args) -> "void":
r"""
*Overload 1:*
获取注记旋转角度
|
*Overload 2:*
设置注记旋转角度
"""
return _gskernel.GsTextSymbol_Angle(self, *args)
def OffsetX(self, *args) -> "void":
r"""
*Overload 1:*
获取注记X偏移
|
*Overload 2:*
设置注记X偏移
"""
return _gskernel.GsTextSymbol_OffsetX(self, *args)
def OffsetY(self, *args) -> "void":
r"""
*Overload 1:*
获取注记Y偏移
|
*Overload 2:*
设置注记Y偏移
"""
return _gskernel.GsTextSymbol_OffsetY(self, *args)
def HorizonExtra(self, *args) -> "void":
r"""
*Overload 1:*
获取注记水平间距
|
*Overload 2:*
设置注记水平间距
"""
return _gskernel.GsTextSymbol_HorizonExtra(self, *args)
def VerticalExtra(self, *args) -> "void":
r"""
*Overload 1:*
获取注记垂直间距
|
*Overload 2:*
设置垂直水平间距
"""
return _gskernel.GsTextSymbol_VerticalExtra(self, *args)
def BackgroundColor(self, *args) -> "void":
r"""
*Overload 1:*
返回注记的背景色
|
*Overload 2:*
设置注记的背景色
"""
return _gskernel.GsTextSymbol_BackgroundColor(self, *args)
def ShadowColor(self, *args) -> "void":
r"""
*Overload 1:*
返回注记阴影的颜色
|
*Overload 2:*
设置注记阴影的颜色
"""
return _gskernel.GsTextSymbol_ShadowColor(self, *args)
def ShadowOffsetX(self, *args) -> "void":
r"""
*Overload 1:*
返回注记阴影的X偏移
|
*Overload 2:*
设置注记阴影的X偏移
"""
return _gskernel.GsTextSymbol_ShadowOffsetX(self, *args)
def ShadowOffsetY(self, *args) -> "void":
r"""
*Overload 1:*
返回注记阴影的Y偏移
|
*Overload 2:*
设置注记阴影的Y偏移
"""
return _gskernel.GsTextSymbol_ShadowOffsetY(self, *args)
def Type(self) -> "GsSymbolType":
r""" 返回符号的类型"""
return _gskernel.GsTextSymbol_Type(self)
def IsValid(self) -> "bool":
r""" 是否有效"""
return _gskernel.GsTextSymbol_IsValid(self)
# Register GsTextSymbol in _gskernel:
_gskernel.GsTextSymbol_swigregister(GsTextSymbol)
eGeoMapDefineFormat = _gskernel.eGeoMapDefineFormat
r""" 地图定义文件的符号存储格式"""
eGenernalFormat = _gskernel.eGenernalFormat
r""" 跨平台符号的符号存储格式。"""
class GsSymbolLibrary(object):
r""" 符号库对象。 用于实例化符号库的符号或者生成符号库文件。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
从符号库文件或者符号库xml字符串实例化对象
|
*Overload 2:*
构造一个空的符号库对象
"""
_gskernel.GsSymbolLibrary_swiginit(self, _gskernel.new_GsSymbolLibrary(*args))
__swig_destroy__ = _gskernel.delete_GsSymbolLibrary
def SymbolByCode(self, *args) -> "GsSmarterPtr< GsSymbol >":
r"""
*Overload 1:*
根据符号的Code查找符号 如果存在多个相同的code则返回第一个符号。
|
*Overload 2:*
根据符号的Code查找符号 :type nCode: int
:param nCode: 需要查找的编号 :type eType: int
:param eType: 需要查找的符号的类型 :rtype: GsSmarterPtr< GsSymbol >
:return: 返回特定类型(eType)下找到的(nCode)编号的第一个符号
"""
return _gskernel.GsSymbolLibrary_SymbolByCode(self, *args)
def SymbolByName(self, strName: 'char const *') -> "GsSmarterPtr< GsSymbol >":
r""" 根据符号的名称查找符号 支持统配查找"""
return _gskernel.GsSymbolLibrary_SymbolByName(self, strName)
def SymbolBySchemaCode(self, SchemaCode: 'char const *') -> "GsSmarterPtr< GsSymbol >":
r""" 根据字符串的图式编号查找Code 支持统配查找"""
return _gskernel.GsSymbolLibrary_SymbolBySchemaCode(self, SchemaCode)
def Symbols(self, *args) -> "GsVector< GsSmarterPtr< GsSymbol > >":
r"""
*Overload 1:*
保存符号的集合
|
*Overload 2:*
返回特定类型的符号。
"""
return _gskernel.GsSymbolLibrary_Symbols(self, *args)
def SaveToString(self, eFormat: 'GsSymbolLibraryFormat'=eGenernalFormat) -> "GsString":
r""" 保存符号库到字符串"""
return _gskernel.GsSymbolLibrary_SaveToString(self, eFormat)
def Save(self, *args) -> "void":
r"""
*Overload 1:*
保存符号库到文件
|
*Overload 2:*
保存符号库到新的文件。
"""
return _gskernel.GsSymbolLibrary_Save(self, *args)
def Name(self, *args) -> "void":
r"""
*Overload 1:*
符号库的名称
|
*Overload 2:*
设置符号库的名称
"""
return _gskernel.GsSymbolLibrary_Name(self, *args)
def Description(self, *args) -> "void":
r"""
*Overload 1:*
符号库的名称
|
*Overload 2:*
设置符号库的描述信息。
"""
return _gskernel.GsSymbolLibrary_Description(self, *args)
@staticmethod
def ToString(pSym: 'GsSymbol', eFormat: 'GsSymbolLibraryFormat'=eGenernalFormat) -> "GsString":
r""" 将符号序列化为字符串。"""
return _gskernel.GsSymbolLibrary_ToString(pSym, eFormat)
@staticmethod
def HashCode(pSym: 'GsSymbol') -> "long long":
r""" 计算符号哈希值"""
return _gskernel.GsSymbolLibrary_HashCode(pSym)
@staticmethod
def ParserSymbol(str: 'char const *') -> "GsSmarterPtr< GsSymbol >":
r""" 从字符串解析生成符号对象"""
return _gskernel.GsSymbolLibrary_ParserSymbol(str)
@staticmethod
def Assign(pSym: 'GsSymbol', strXML: 'char const *') -> "bool":
r""" 为特定的符号解析并配置属性"""
return _gskernel.GsSymbolLibrary_Assign(pSym, strXML)
# Register GsSymbolLibrary in _gskernel:
_gskernel.GsSymbolLibrary_swigregister(GsSymbolLibrary)
def GsSymbolLibrary_ToString(pSym: 'GsSymbol', eFormat: 'GsSymbolLibraryFormat'=eGenernalFormat) -> "GsString":
r""" 将符号序列化为字符串。"""
return _gskernel.GsSymbolLibrary_ToString(pSym, eFormat)
def GsSymbolLibrary_HashCode(pSym: 'GsSymbol') -> "long long":
r""" 计算符号哈希值"""
return _gskernel.GsSymbolLibrary_HashCode(pSym)
def GsSymbolLibrary_ParserSymbol(str: 'char const *') -> "GsSmarterPtr< GsSymbol >":
r""" 从字符串解析生成符号对象"""
return _gskernel.GsSymbolLibrary_ParserSymbol(str)
def GsSymbolLibrary_Assign(pSym: 'GsSymbol', strXML: 'char const *') -> "bool":
r""" 为特定的符号解析并配置属性"""
return _gskernel.GsSymbolLibrary_Assign(pSym, strXML)
class GsMultiLineSymbol(GsLineSymbol):
r""" 复合线符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_gskernel.GsMultiLineSymbol_swiginit(self, _gskernel.new_GsMultiLineSymbol())
def Add(self, pSymbol: 'GsLineSymbol') -> "void":
r""" 增加子符号"""
return _gskernel.GsMultiLineSymbol_Add(self, pSymbol)
def Clear(self) -> "void":
r""" 清空所有的符号"""
return _gskernel.GsMultiLineSymbol_Clear(self)
def Count(self) -> "int":
r""" 子符号的数量"""
return _gskernel.GsMultiLineSymbol_Count(self)
def RemoveAt(self, i: 'int') -> "void":
r""" 子符号的数量"""
return _gskernel.GsMultiLineSymbol_RemoveAt(self, i)
def ElementAt(self, i: 'int') -> "GsLineSymbol *":
r""" 获取子符号"""
return _gskernel.GsMultiLineSymbol_ElementAt(self, i)
def IsValid(self) -> "bool":
r""" 符号是否有效 符号是否会在画布上绘制内容,如果符号不会在画布上绘制任何的内容则不需要执行绘制过程。"""
return _gskernel.GsMultiLineSymbol_IsValid(self)
def Width(self, *args) -> "void":
r"""
*Overload 1:*
获取线符号的宽度,单位毫米
|
*Overload 2:*
设置线符号的宽度,单位毫米
"""
return _gskernel.GsMultiLineSymbol_Width(self, *args)
def Color(self, *args) -> "void":
r"""
*Overload 1:*
获取线符号颜色
|
*Overload 2:*
设置线符号颜色
"""
return _gskernel.GsMultiLineSymbol_Color(self, *args)
__swig_destroy__ = _gskernel.delete_GsMultiLineSymbol
# Register GsMultiLineSymbol in _gskernel:
_gskernel.GsMultiLineSymbol_swigregister(GsMultiLineSymbol)
class GsMultiFillSymbol(GsFillSymbol):
r""" 复合面符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_gskernel.GsMultiFillSymbol_swiginit(self, _gskernel.new_GsMultiFillSymbol())
def Add(self, pSymbol: 'GsFillSymbol') -> "void":
r""" 增加子符号"""
return _gskernel.GsMultiFillSymbol_Add(self, pSymbol)
def Clear(self) -> "void":
r""" 清空所有的符号"""
return _gskernel.GsMultiFillSymbol_Clear(self)
def Count(self) -> "int":
r""" 子符号的数量"""
return _gskernel.GsMultiFillSymbol_Count(self)
def RemoveAt(self, i: 'int') -> "void":
r""" 子符号的数量"""
return _gskernel.GsMultiFillSymbol_RemoveAt(self, i)
def ElementAt(self, i: 'int') -> "GsFillSymbol *":
r""" 获取子符号"""
return _gskernel.GsMultiFillSymbol_ElementAt(self, i)
def IsValid(self) -> "bool":
r""" 符号是否有效 符号是否会在画布上绘制内容,如果符号不会在画布上绘制任何的内容则不需要执行绘制过程。"""
return _gskernel.GsMultiFillSymbol_IsValid(self)
def FillColor(self, *args) -> "void":
r"""
*Overload 1:*
获取填充的颜色
|
*Overload 2:*
设置填充的颜色
"""
return _gskernel.GsMultiFillSymbol_FillColor(self, *args)
__swig_destroy__ = _gskernel.delete_GsMultiFillSymbol
# Register GsMultiFillSymbol in _gskernel:
_gskernel.GsMultiFillSymbol_swigregister(GsMultiFillSymbol)
class GsMultiPointSymbol(GsPointSymbol):
r""" 复合点符号"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def Add(self, pSymbol: 'GsPointSymbol') -> "void":
r""" 增加子符号"""
return _gskernel.GsMultiPointSymbol_Add(self, pSymbol)
def Clear(self) -> "void":
r""" 清空所有的符号"""
return _gskernel.GsMultiPointSymbol_Clear(self)
def Count(self) -> "int":
r""" 子符号的数量"""
return _gskernel.GsMultiPointSymbol_Count(self)
def RemoveAt(self, i: 'int') -> "void":
r""" 子符号的数量"""
return _gskernel.GsMultiPointSymbol_RemoveAt(self, i)
def ElementAt(self, i: 'int') -> "GsPointSymbol *":
r""" 获取子符号"""
return _gskernel.GsMultiPointSymbol_ElementAt(self, i)
def IsValid(self) -> "bool":
r""" 符号是否有效 符号是否会在画布上绘制内容,如果符号不会在画布上绘制任何的内容则不需要执行绘制过程。"""
return _gskernel.GsMultiPointSymbol_IsValid(self)
def __init__(self):
_gskernel.GsMultiPointSymbol_swiginit(self, _gskernel.new_GsMultiPointSymbol())
def Size(self, *args) -> "void":
r"""
*Overload 1:*
符号大小
|
*Overload 2:*
设置符号大小
"""
return _gskernel.GsMultiPointSymbol_Size(self, *args)
def Color(self, *args) -> "void":
r"""
*Overload 1:*
颜色
|
*Overload 2:*
设置颜色
"""
return _gskernel.GsMultiPointSymbol_Color(self, *args)
def Angle(self, *args) -> "void":
r"""
*Overload 1:*
获取旋转角
|
*Overload 2:*
| |
'''
Train and test bidirectional language models.
'''
import os
import time
import json
import re
import tensorflow as tf
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from horovod.tensorflow.compression import Compression
# Change 1
#import horovod.tensorflow as hvd
# Change lms
from tensorflow_large_model_support import LMS
from tensorflow.python.client import device_lib
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from .data import Vocabulary, UnicodeCharsVocabulary, InvalidNumberOfCharacters
DTYPE = 'float32'
DTYPE_INT = 'int64'
tf.logging.set_verbosity(tf.logging.INFO)
def print_variable_summary():
import pprint
variables = sorted([[v.name, v.get_shape()] for v in tf.global_variables()])
pprint.pprint(variables)
class LanguageModel(object):
'''
A class to build the tensorflow computational graph for NLMs
All hyperparameters and model configuration is specified in a dictionary
of 'options'.
is_training is a boolean used to control behavior of dropout layers
and softmax. Set to False for testing.
The LSTM cell is controlled by the 'lstm' key in options
Here is an example:
'lstm': {
'cell_clip': 5,
'dim': 4096,
'n_layers': 2,
'proj_clip': 5,
'projection_dim': 512,
'use_skip_connections': True},
'projection_dim' is assumed token embedding size and LSTM output size.
'dim' is the hidden state size.
Set 'dim' == 'projection_dim' to skip a projection layer.
'''
def __init__(self, options, is_training):
self.options = options
self.is_training = is_training
self.bidirectional = options.get('bidirectional', False)
# use word or char inputs?
self.char_inputs = 'char_cnn' in self.options
# for the loss function
self.share_embedding_softmax = options.get(
'share_embedding_softmax', False)
if self.char_inputs and self.share_embedding_softmax:
raise ValueError("Sharing softmax and embedding weights requires "
"word input")
self.sample_softmax = options.get('sample_softmax', True)
self._build()
def _build_word_embeddings(self):
n_tokens_vocab = self.options['n_tokens_vocab']
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
# LSTM options
projection_dim = self.options['lstm']['projection_dim']
# the input token_ids and word embeddings
self.token_ids = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name='token_ids')
# the word embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"embedding", [n_tokens_vocab, projection_dim],
dtype=DTYPE,
)
self.embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.token_ids)
# if a bidirectional LM then make placeholders for reverse
# model and embeddings
if self.bidirectional:
self.token_ids_reverse = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name='token_ids_reverse')
with tf.device("/cpu:0"):
self.embedding_reverse = tf.nn.embedding_lookup(
self.embedding_weights, self.token_ids_reverse)
def _build_word_char_embeddings(self):
'''
options contains key 'char_cnn': {
'n_characters': 262,
# includes the start / end characters
'max_characters_per_token': 50,
'filters': [
[1, 32],
[2, 32],
[3, 64],
[4, 128],
[5, 256],
[6, 512],
[7, 512]
],
'activation': 'tanh',
# for the character embedding
'embedding': {'dim': 16}
# for highway layers
# if omitted, then no highway layers
'n_highway': 2,
}
'''
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
projection_dim = self.options['lstm']['projection_dim']
cnn_options = self.options['char_cnn']
filters = cnn_options['filters']
n_filters = sum(f[1] for f in filters)
max_chars = cnn_options['max_characters_per_token']
char_embed_dim = cnn_options['embedding']['dim']
n_chars = cnn_options['n_characters']
if n_chars != 261:
raise InvalidNumberOfCharacters(
"Set n_characters=261 for training see the README.md"
)
if cnn_options['activation'] == 'tanh':
activation = tf.nn.tanh
elif cnn_options['activation'] == 'relu':
activation = tf.nn.relu
# the input character ids
self.tokens_characters = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps, max_chars),
name='tokens_characters')
# the character embeddings
with tf.device("/cpu:0"):
self.embedding_weights = tf.get_variable(
"char_embed", [n_chars, char_embed_dim],
dtype=DTYPE,
initializer=tf.random_uniform_initializer(-1.0, 1.0)
)
# shape (batch_size, unroll_steps, max_chars, embed_dim)
self.char_embedding = tf.nn.embedding_lookup(self.embedding_weights,
self.tokens_characters)
if self.bidirectional:
self.tokens_characters_reverse = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps, max_chars),
name='tokens_characters_reverse')
self.char_embedding_reverse = tf.nn.embedding_lookup(
self.embedding_weights, self.tokens_characters_reverse)
# the convolutions
def make_convolutions(inp, reuse):
with tf.variable_scope('CNN', reuse=reuse) as scope:
convolutions = []
for i, (width, num) in enumerate(filters):
if cnn_options['activation'] == 'relu':
# He initialization for ReLU activation
# with char embeddings init between -1 and 1
#w_init = tf.random_normal_initializer(
# mean=0.0,
# stddev=np.sqrt(2.0 / (width * char_embed_dim))
#)
# Kim et al 2015, +/- 0.05
w_init = tf.random_uniform_initializer(
minval=-0.05, maxval=0.05)
elif cnn_options['activation'] == 'tanh':
# glorot init
w_init = tf.random_normal_initializer(
mean=0.0,
stddev=np.sqrt(1.0 / (width * char_embed_dim))
)
w = tf.get_variable(
"W_cnn_%s" % i,
[1, width, char_embed_dim, num],
initializer=w_init,
dtype=DTYPE)
b = tf.get_variable(
"b_cnn_%s" % i, [num], dtype=DTYPE,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(
inp, w,
strides=[1, 1, 1, 1],
padding="VALID") + b
# now max pool
conv = tf.nn.max_pool(
conv, [1, 1, max_chars-width+1, 1],
[1, 1, 1, 1], 'VALID')
# activation
conv = activation(conv)
conv = tf.squeeze(conv, squeeze_dims=[2])
convolutions.append(conv)
return tf.concat(convolutions, 2)
# for first model, this is False, for others it's True
reuse = tf.get_variable_scope().reuse
embedding = make_convolutions(self.char_embedding, reuse)
self.token_embedding_layers = [embedding]
if self.bidirectional:
# re-use the CNN weights from forward pass
embedding_reverse = make_convolutions(
self.char_embedding_reverse, True)
# for highway and projection layers:
# reshape from (batch_size, n_tokens, dim) to
n_highway = cnn_options.get('n_highway')
use_highway = n_highway is not None and n_highway > 0
use_proj = n_filters != projection_dim
if use_highway or use_proj:
embedding = tf.reshape(embedding, [-1, n_filters])
if self.bidirectional:
embedding_reverse = tf.reshape(embedding_reverse,
[-1, n_filters])
# set up weights for projection
if use_proj:
assert n_filters > projection_dim
with tf.variable_scope('CNN_proj') as scope:
W_proj_cnn = tf.get_variable(
"W_proj", [n_filters, projection_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / n_filters)),
dtype=DTYPE)
b_proj_cnn = tf.get_variable(
"b_proj", [projection_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
# apply highways layers
def high(x, ww_carry, bb_carry, ww_tr, bb_tr):
carry_gate = tf.nn.sigmoid(tf.matmul(x, ww_carry) + bb_carry)
transform_gate = tf.nn.relu(tf.matmul(x, ww_tr) + bb_tr)
return carry_gate * transform_gate + (1.0 - carry_gate) * x
if use_highway:
highway_dim = n_filters
for i in range(n_highway):
with tf.variable_scope('CNN_high_%s' % i) as scope:
W_carry = tf.get_variable(
'W_carry', [highway_dim, highway_dim],
# glorit init
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_carry = tf.get_variable(
'b_carry', [highway_dim],
initializer=tf.constant_initializer(-2.0),
dtype=DTYPE)
W_transform = tf.get_variable(
'W_transform', [highway_dim, highway_dim],
initializer=tf.random_normal_initializer(
mean=0.0, stddev=np.sqrt(1.0 / highway_dim)),
dtype=DTYPE)
b_transform = tf.get_variable(
'b_transform', [highway_dim],
initializer=tf.constant_initializer(0.0),
dtype=DTYPE)
embedding = high(embedding, W_carry, b_carry,
W_transform, b_transform)
if self.bidirectional:
embedding_reverse = high(embedding_reverse,
W_carry, b_carry,
W_transform, b_transform)
self.token_embedding_layers.append(
tf.reshape(embedding,
[batch_size, unroll_steps, highway_dim])
)
# finally project down to projection dim if needed
if use_proj:
embedding = tf.matmul(embedding, W_proj_cnn) + b_proj_cnn
if self.bidirectional:
embedding_reverse = tf.matmul(embedding_reverse, W_proj_cnn) \
+ b_proj_cnn
self.token_embedding_layers.append(
tf.reshape(embedding,
[batch_size, unroll_steps, projection_dim])
)
# reshape back to (batch_size, tokens, dim)
if use_highway or use_proj:
shp = [batch_size, unroll_steps, projection_dim]
embedding = tf.reshape(embedding, shp)
if self.bidirectional:
embedding_reverse = tf.reshape(embedding_reverse, shp)
# at last assign attributes for remainder of the model
self.embedding = embedding
if self.bidirectional:
self.embedding_reverse = embedding_reverse
def _build(self):
# size of input options
n_tokens_vocab = self.options['n_tokens_vocab']
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
# LSTM options
lstm_dim = self.options['lstm']['dim']
projection_dim = self.options['lstm']['projection_dim']
n_lstm_layers = self.options['lstm'].get('n_layers', 1)
dropout = self.options['dropout']
keep_prob = 1.0 - dropout
if self.char_inputs:
self._build_word_char_embeddings()
else:
self._build_word_embeddings()
# now the LSTMs
# these will collect the initial states for the forward
# (and reverse LSTMs if we are doing bidirectional)
self.init_lstm_state = []
self.final_lstm_state = []
# get the LSTM inputs
if self.bidirectional:
lstm_inputs = [self.embedding, self.embedding_reverse]
else:
lstm_inputs = [self.embedding]
# now compute the LSTM outputs
cell_clip = self.options['lstm'].get('cell_clip')
proj_clip = self.options['lstm'].get('proj_clip')
use_skip_connections = self.options['lstm'].get(
'use_skip_connections')
if use_skip_connections:
print("USING SKIP CONNECTIONS")
lstm_outputs = []
for lstm_num, lstm_input in enumerate(lstm_inputs):
lstm_cells = []
for i in range(n_lstm_layers):
if projection_dim < lstm_dim:
# are projecting down output
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim, num_proj=projection_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
else:
lstm_cell = tf.nn.rnn_cell.LSTMCell(
lstm_dim,
cell_clip=cell_clip, proj_clip=proj_clip)
if use_skip_connections:
# ResidualWrapper adds inputs to outputs
if i == 0:
# don't add skip connection from token embedding to
# 1st layer output
pass
else:
# add a skip connection
lstm_cell = tf.nn.rnn_cell.ResidualWrapper(lstm_cell)
# add dropout
if self.is_training:
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell,
input_keep_prob=keep_prob)
lstm_cells.append(lstm_cell)
if n_lstm_layers > 1:
lstm_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)
else:
lstm_cell = lstm_cells[0]
with tf.control_dependencies([lstm_input]):
self.init_lstm_state.append(
lstm_cell.zero_state(batch_size, DTYPE))
# NOTE: this variable scope is for backward compatibility
# with existing models...
if self.bidirectional:
with tf.variable_scope('RNN_%s' % lstm_num):
_lstm_output_unpacked, final_state = tf.nn.static_rnn(
lstm_cell,
tf.unstack(lstm_input, axis=1),
initial_state=self.init_lstm_state[-1])
else:
_lstm_output_unpacked, final_state = tf.nn.static_rnn(
lstm_cell,
tf.unstack(lstm_input, axis=1),
initial_state=self.init_lstm_state[-1])
self.final_lstm_state.append(final_state)
# (batch_size * unroll_steps, 512)
lstm_output_flat = tf.reshape(
tf.stack(_lstm_output_unpacked, axis=1), [-1, projection_dim])
if self.is_training:
# add dropout to output
lstm_output_flat = tf.nn.dropout(lstm_output_flat,
keep_prob)
tf.add_to_collection('lstm_output_embeddings',
_lstm_output_unpacked)
lstm_outputs.append(lstm_output_flat)
self._build_loss(lstm_outputs)
def _build_loss(self, lstm_outputs):
'''
Create:
self.total_loss: total loss op for training
self.softmax_W, softmax_b: the softmax variables
self.next_token_id / _reverse: placeholders for gold input
'''
batch_size = self.options['batch_size']
unroll_steps = self.options['unroll_steps']
n_tokens_vocab = self.options['n_tokens_vocab']
# DEFINE next_token_id and *_reverse placeholders for the gold input
def _get_next_token_placeholders(suffix):
name = 'next_token_id' + suffix
id_placeholder = tf.placeholder(DTYPE_INT,
shape=(batch_size, unroll_steps),
name=name)
return id_placeholder
# get the window and weight placeholders
self.next_token_id = _get_next_token_placeholders('')
if self.bidirectional:
self.next_token_id_reverse = _get_next_token_placeholders(
'_reverse')
# DEFINE THE SOFTMAX VARIABLES
# get the dimension of the softmax weights
# softmax dimension is the size of the | |
amp_mod[pos]*temp_jitt[diff:]
else:
diff = n_samples - (spos - cut_out[0])
gt_source[spos - cut_out[0]:] += amp_mod[pos]*temp_jitt[:diff]
else:
# print('No modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos - cut_out[0] + len_spike <= n_samples:
gt_source[spos - cut_out[0]:spos - cut_out[0] + len_spike] += template
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
gt_source[:spos - cut_out[0] + len_spike] += template[diff:]
else:
diff = n_samples - (spos - cut_out[0])
gt_source[spos - cut_out[0]:] += template[:diff]
return gt_source
def convolve_templates_spiketrains(spike_id, spike_bin, template, cut_out=None, modulation=False, amp_mod=None, recordings=[]):
'''
Parameters
----------
spike_id
spike_bin
template
modulation
amp_mod
recordings
Returns
-------
'''
print('START: convolution with spike ', spike_id)
if len(template.shape) == 3:
njitt = template.shape[0]
n_elec = template.shape[1]
len_spike = template.shape[2]
else:
n_elec = template.shape[0]
len_spike = template.shape[1]
n_samples = len(spike_bin)
if len(recordings) == 0:
recordings = np.zeros((n_elec, n_samples))
if cut_out is None:
cut_out = [len_spike//2, len_spike//2]
# recordings_test = np.zeros((n_elec, n_samples))
if not modulation:
spike_pos = np.where(spike_bin == 1)[0]
amp_mod = np.ones_like(spike_pos)
if len(template.shape) == 3:
rand_idx = np.random.randint(njitt)
# print('rand_idx: ', rand_idx)
temp_jitt = template[rand_idx]
# print('No modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[pos] * temp_jitt
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * temp_jitt[:, diff:]
else:
diff = n_samples - (spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * temp_jitt[:, :diff]
raise Exception()
else:
# print('No jitter')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[
pos] * template
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * template[:, diff:]
else:
diff = n_samples - (spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * template[:, :diff]
else:
assert amp_mod is not None
spike_pos = np.where(spike_bin == 1)[0]
if len(template.shape) == 3:
rand_idx = np.random.randint(njitt)
# print('rand_idx: ', rand_idx)
temp_jitt = template[rand_idx]
if not isinstance(amp_mod[0], (list, tuple, np.ndarray)):
#print('Template modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[pos] * temp_jitt
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * temp_jitt[:, diff:]
else:
diff = n_samples-(spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * temp_jitt[:, :diff]
else:
#print('Electrode modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += \
[a * t for (a, t) in zip(amp_mod[pos], temp_jitt)]
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += \
[a * t for (a, t) in zip(amp_mod[pos], temp_jitt[:, diff:])]
# recordings[:, :spos + cut_out[1]] += amp_mod[pos] * template[:, diff:]
else:
diff = n_samples-(spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += \
[a * t for (a, t) in zip(amp_mod[pos], temp_jitt[:, :diff])]
else:
if not isinstance(amp_mod[0], (list, tuple, np.ndarray)):
#print('Template modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[
pos] * template
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * template[:, diff:]
else:
diff = n_samples - (spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * template[:, :diff]
else:
#print('Electrode modulation')
for pos, spos in enumerate(spike_pos):
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += \
[a * t for (a, t) in zip(amp_mod[pos], template)]
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, : spos + cut_out[1]] += \
[a * t for (a, t) in zip(amp_mod[pos], template[:, diff:])]
# recordings[:, :spos + cut_out[1]] += amp_mod[pos] * template[:, diff:]
else:
diff = n_samples-(spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += \
[a * t for (a, t) in zip(amp_mod[pos], template[:, :diff])]
# recordings[:, spos - cut_out[0]:] += amp_mod[pos] * template[:, :diff]
#print('DONE: convolution with spike ', spike_id)
return recordings
def convolve_drifting_templates_spiketrains(spike_id, spike_bin, template, fs, loc, v_drift, t_start_drift,
modulation=False, amp_mod=None, recordings=[], n_step_sec=1):
'''
Parameters
----------
spike_id
spike_bin
template
fs
loc
v_drift
t_start_drift
modulation
amp_mod
recordings
n_step_sec
Returns
-------
'''
print('START: convolution with spike ', spike_id)
if len(template.shape) == 4:
njitt = template.shape[1]
n_elec = template.shape[2]
len_spike = template.shape[3]
elif len(template.shape) == 3:
n_elec = template.shape[1]
len_spike = template.shape[2]
n_samples = len(spike_bin)
n_step_sec = 1
dur = (n_samples / fs).rescale('s').magnitude
t_steps = np.arange(0, dur, n_step_sec)
n_step_sample = n_step_sec * int(fs.magnitude)
dt = 2 ** -5
mixing = np.zeros((int(n_samples/float(fs.rescale('Hz').magnitude)), n_elec))
if len(recordings) == 0:
recordings = np.zeros((n_elec, n_samples))
# recordings_test = np.zeros((n_elec, n_samples))
if not modulation:
spike_pos = np.where(spike_bin == 1)[0]
amp_mod = np.ones_like(spike_pos)
if len(template.shape) == 4:
rand_idx = np.random.randint(njitt)
print('rand_idx: ', rand_idx)
print('No modulation')
for pos, spos in enumerate(spike_pos):
sp_time = spos / fs
if sp_time < t_start_drift:
print(sp_time, 'No drift', loc[0])
temp_idx = 0
temp_jitt = template[temp_idx, rand_idx]
else:
# compute current position
new_pos = np.array(loc[0, 1:] + v_drift * (sp_time - t_start_drift).rescale('s').magnitude)
temp_idx = np.argmin([np.linalg.norm(p - new_pos) for p in loc[:, 1:]])
print(sp_time, temp_idx, 'Drifting', new_pos, loc[temp_idx, 1:])
temp_jitt = template[temp_idx, rand_idx]
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[pos] * temp_jitt
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * temp_jitt[:, diff:]
else:
diff = n_samples - (spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * temp_jitt[:, :diff]
for i, t in enumerate(t_steps):
if t < t_start_drift:
temp_idx = 0
temp_jitt = template[temp_idx, rand_idx]
else:
# compute current position
new_pos = np.array(loc[0, 1:] + v_drift * (t - t_start_drift.rescale('s').magnitude))
temp_idx = np.argmin([np.linalg.norm(p - new_pos) for p in loc[:, 1:]])
temp_jitt = template[temp_idx, rand_idx]
feat = get_EAP_features(np.squeeze(temp_jitt), ['Na'], dt=dt)
mixing[i] = -np.squeeze(feat['na'])
else:
print('No jitter')
for pos, spos in enumerate(spike_pos):
sp_time = spos / fs
if sp_time < t_start_drift:
temp_idx = 0
temp = template[temp_idx]
else:
# compute current position
new_pos = np.array(loc[0, 1:] + v_drift * (sp_time - t_start_drift).rescale('s').magnitude)
temp_idx = np.argmin([np.linalg.norm(p - new_pos) for p in loc[:, 1:]])
temp = template[temp_idx]
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[pos] * temp
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * temp[:, diff:]
else:
diff = n_samples - (spos - cut_out[0])
recordings[:, spos - cut_out[0]:] += amp_mod[pos] * temp[:, :diff]
for i, t in enumerate(t_steps):
if t < t_start_drift:
temp_idx = 0
temp_jitt = template[temp_idx]
else:
# compute current position
new_pos = np.array(loc[0, 1:] + v_drift * (t - t_start_drift.rescale('s').magnitude))
temp_idx = np.argmin([np.linalg.norm(p - new_pos) for p in loc[:, 1:]])
temp_jitt = template[temp_idx]
feat = get_EAP_features(np.squeeze(temp_jitt), ['Na'], dt=dt)
mixing[i] = -np.squeeze(feat['na'])
else:
assert amp_mod is not None
spike_pos = np.where(spike_bin == 1)[0]
if len(template.shape) == 4:
rand_idx = np.random.randint(njitt)
print('rand_idx: ', rand_idx)
if not isinstance(amp_mod[0], (list, tuple, np.ndarray)):
print('Template modulation')
for pos, spos in enumerate(spike_pos):
sp_time = spos / fs
if sp_time < t_start_drift:
print(sp_time, 'No drift', loc[0])
temp_idx = 0
temp_jitt = template[temp_idx, rand_idx]
else:
# compute current position
new_pos = np.array(loc[0, 1:] + v_drift * (sp_time - t_start_drift).rescale('s').magnitude)
temp_idx = np.argmin([np.linalg.norm(p - new_pos) for p in loc[:, 1:]])
temp_jitt = template[temp_idx, rand_idx]
print(sp_time, temp_idx, 'Drifting', new_pos, loc[temp_idx, 1:])
if spos - cut_out[0] >= 0 and spos + cut_out[1] <= n_samples:
recordings[:, spos - cut_out[0]:spos + cut_out[1]] += amp_mod[pos] \
* temp_jitt
elif spos - cut_out[0] < 0:
diff = -(spos - cut_out[0])
recordings[:, :spos + cut_out[1]] += amp_mod[pos] * temp_jitt[:, diff:]
| |
<reponame>CartoDB/bigmetadata
from tasks.base_tasks import (ColumnsTask, TableTask, TagsTask, CSV2TempTableTask, RepoFileUnzipTask,
RepoFileGUnzipTask, RepoFile)
from tasks.eu.geo import NUTSColumns, NUTSGeometries
from tasks.meta import OBSColumn, OBSTag, current_session, GEOM_REF
from tasks.tags import SectionTags, SubsectionTags, UnitTags
from tasks.util import underscore_slugify, classpath, shell, copyfile
from luigi import IntParameter, Parameter, WrapperTask, Task, LocalTarget
from collections import OrderedDict
from lib.columns import ColumnsDeclarations
from lib.logger import get_logger
from lib.timespan import get_timespan
import glob
import csv
import os
import re
# dl_code_list = "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&downfile=dic%2Fen%2F{code}.dic".format(code=code)
# flag_explanation = "http://ec.europa.eu/eurostat/data/database/information"
# database = "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?dir=data&sort=1&sort=2&start={}".format(first_letter)
# dl_data = "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&downfile=data%2F{}.tsv.gz".format(table_code)
# dl_data = "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=data%2Fdemo_r_pjangrp3.tsv.gz
LOGGER = get_logger(__name__)
class DownloadEurostat(Task):
table_code = Parameter()
URL = "http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=data%2F{code}.tsv.gz"
def version(self):
return 1
def requires(self):
url = self.URL.format(code=self.table_code.lower())
return RepoFile(resource_id=self.task_id,
version=self.version(),
url=url)
def run(self):
copyfile(self.input().path, '{output}.gz'.format(output=self.output().path))
shell('gunzip {output}.gz'.format(output=self.output().path))
def output(self):
return LocalTarget(os.path.join('tmp', classpath(self), self.task_id))
class ProcessCSV(Task):
table_code = Parameter()
def requires(self):
return DownloadEurostat(table_code=self.table_code)
def run(self):
shell("cat {infile} | tr '\' ',' | tr '\t' ',' > {outfile}".format(
outfile=self.output().path,
infile=self.input().path))
def output(self):
return LocalTarget(self.input().path + '.csv')
class EUTempTable(CSV2TempTableTask):
delimiter = Parameter(default=',', significant=False)
table_name = Parameter() # Ex. "DEMO_R_PJANAGGR3"
def version(self):
return 5
def requires(self):
return ProcessCSV(table_code=self.table_name)
def coldef(self):
coldefs = super(EUTempTable, self).coldef()
for i, cd in enumerate(coldefs):
cdtemp = list(cd)
cdtemp[0] = cdtemp[0].strip()
newcd = tuple(cdtemp)
coldefs[i] = newcd
return coldefs
def input_csv(self):
return self.input().path
class DownloadUnzipDICTTables(RepoFileUnzipTask):
URL = 'http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=dic%2Fall_dic.zip'
def get_url(self):
return self.URL
class DICTablesCache(object):
LANGUAGE = 'en'
def __init__(self):
self._cache = {}
def get(self, dname, fname):
filepath = os.path.join(dname, self.LANGUAGE, fname)
if filepath not in self._cache:
LOGGER.info('Caching %s', fname)
with open(filepath, 'r') as fhandle:
reader = csv.reader(fhandle, delimiter='\t')
self._cache[filepath] = {}
for key, val in reader:
self._cache[filepath][key] = val
LOGGER.info('Cached %s, with %s lines', filepath, len(self._cache[filepath]))
else:
LOGGER.debug('Cache hit for %s', filepath)
return self._cache[filepath]
class DownloadGUnzipMetabase(RepoFileGUnzipTask):
URL = 'http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=metabase.txt.gz'
def get_url(self):
return self.URL
class MetabaseTable(CSV2TempTableTask):
has_header = False
delimiter = '\t'
def coldef(self):
return [
('table_code', 'TEXT',),
('dimension', 'TEXT',),
('value', 'TEXT',),
]
def requires(self):
return DownloadGUnzipMetabase()
def input_csv(self):
return [file for file in glob.glob(os.path.join(DownloadGUnzipMetabase().output().path, '*.csv'))]
def after_copy(self):
session = current_session()
session.execute('CREATE UNIQUE INDEX ON {table} (table_code, dimension, value)'.format(
table=self.output().table
))
class SourceTags(TagsTask):
def tags(self):
return [OBSTag(id='eurostat-source',
name='Eurostat',
type='source',
description='Eurostat data can be found `here <http://ec.europa.eu>`_.')]
class LicenseTags(TagsTask):
def tags(self):
return [OBSTag(id='eurostat-license',
name='Copyright European Union',
type='license',
description='Reuse is authorised, provided the source is acknowledged. Full information `here <https://ec.europa.eu/info/legal-notice_en#copyright-notice>`_')]
CACHE = DICTablesCache()
def simplify_description(description):
description = description.replace(
'Employer business demography - ',
'')
description = description.replace(
'Business demography - ',
'')
description = description.replace(
'number of persons employed in the reference period (t) among enterprises newly born in t divided by the number of enterprises newly born in t,',
'')
description = description.replace(
'Industry, construction and services except insurance activities of holding companies,',
'')
description = description.replace(
'Number of births of enterprises in t',
'Number of enterprise births')
description = description.replace(
'number of persons employed in the reference period (t) among enterprises newly born in t divided by the number of enterprises newly born in t,',
'')
description = description.replace(
'number of persons employed in the reference period (t) among enterprises newly born in t-3 having survived to t divided by the number of enterprises in t newly born in t-3 having survived to t,',
'')
description = description.replace(
'number of enterprise births in the reference period (t) divided by the number of enterprises active in t,',
'')
description = description.replace('Birth rate: number of enterprise births in the reference period (t) divided by the number of enterprises active in t,','Enterprise birth rate:')
description = description.replace('Death rate: number of enterprise deaths in the reference period (t) divided by the number of enterprises active in t,',
'Enterprise death rate:')
description = description.replace('Number of persons employed in enterprises newly born in t-3 having survived to t, divided by the number of persons employed in the population of active enterprises in t,'
,'')
description = description.replace('Employment share of enterprise births: number of persons employed in the reference period (t) among enterprises newly born in t divided by the number of persons employed in t among the stock of enterprises active in t,'
,'Employment share of new enterprises: ')
description = description.replace('number of persons employed in the reference period (t) among enterprise deaths divided by the number of persons employed in t among the stock of active enterprises in t,'
,'')
description = description.replace('number of employees in the reference period (t) among enterprises newly born in t divided by the number of persons employed in t among enterprises newly born in t,'
,'')
description = description.replace('Number of births of enterprises in t,'
,'Enterprise births:')
description = description.replace('Number of deaths of enterprises in t,'
,'Enterprise deaths:')
description = description.replace('Number of employees in the population of active enterprises in t,'
,'Employees of active enterprises:')
description = description.replace('Number of employees in the population of births in t,'
,'Employees of new enterprises:')
description = description.replace('Number of employees in the population of deaths in t,'
,'Employees in the population of enterprise deaths:')
description = description.replace('Number of persons employed in the population of enterprises newly born in t-3 having survived to t,'
,'People employed in surviving three-year old enterprises:')
description = description.replace('Number of persons employed in the year of birth in the population of enterprises newly born in t-3 having survived to t,'
,'People continuously employed in surviving three-year old enterprises:')
description = description.replace('Population of active enterprises in t,'
,'Active enterprises:')
description = description.replace('number of persons employed in the reference period (t) among enterprises newly born in t-3 having survived to t divided by the number of persons employed in t-3 by the same enterprises, expressed as a percentage growth rate,'
,'')
description = description.replace(
'Number of enterprises newly born in t-3 having survived to t',
'Number of surviving three-year old enterprises'
)
description = description.replace(
'Number of persons employed in the population of active enterprises in t',
'People employed in active enterprises'
)
description = description.replace(
'Number of persons employed in the population of births in t',
'People employed in new enterprises'
)
description = description.replace(
'Number of persons employed in the population of deaths in t',
'People employed in the population of enterprise deaths'
)
description = description.replace(
'Proportion of enterprise births in the reference period (t) by size class,',
'Proportion of enterprise births by size class:'
)
description = description.replace(
'Hotels; holiday and other short-stay accommodation; camping grounds, recreational vehicle parks and trailer parks',
'Hotels, holiday, campgrounds, and other short-stay accomodations'
)
description = description.replace(
'Arts, entertainment and recreation; other service activities; activities of household and extra-territorial organizations and bodies',
'Arts, entertainment, recreation, and other service activities')
description = description.replace(
'Proportion of enterprise deaths in the reference period (t) by size class,',
'Proportion of enterprise deaths by size class:'
)
description = description.replace(
'number of persons employed in the reference period (t) among enterprise deaths in t divided by the number of enterprise deaths in t',
''
)
description = description.replace(
'Industry, construction and services except insurance activities of holding companies,',
'')
description = description.replace(
'Professional, scientific and technical activities; administrative and support service activities'
,'Professional, scientific, technical, administrative and support service activities')
description = description.replace(
'Financial and insurance activities; real estate activities except activities of holding companies',
'Financial, insurance, and real estate activities'
)
description = re.sub(r' zero$', ' zero employees', description)
return description
class FlexEurostatColumns(ColumnsTask):
subsection = Parameter() # Ex. 'age_gender'
units = Parameter() # Ex. 'people'
nuts_level = Parameter()
table_name = Parameter() # Ex. "DEMO_R_PJANAGGR3"
year = Parameter()
# From tablename, determine basis of name for columns from table_dic.dic
# Then, look at metabase.txt to find relevant dimensions (exclude "geo" and "time", utilize "unit")
# Finally, look up definitions for dimensions from their .dic files, and use that to complete the
# metadata definition
def requires(self):
return {
'DICTTables': DownloadUnzipDICTTables(),
'units': UnitTags(),
'subsection': SubsectionTags(),
'section': SectionTags(),
'source': SourceTags(),
'license': LicenseTags(),
'metabase': MetabaseTable(),
}
def version(self):
return 18
def columns(self):
columns = OrderedDict()
input_ = self.input()
subsectiontags = input_['subsection']
unittags = input_['units']
eu = input_['section']['eu']
licensing = input_['license']['eurostat-license']
source = input_['source']['eurostat-source']
cache = CACHE
dicttables_path = input_['DICTTables'].path
session = | |
from tkinter import *
from threading import Thread
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from tkinter.messagebox import showinfo
from os.path import isdir
from os.path import isfile
import pandas as pd
import numpy as np
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
from email.mime.multipart import MIMEMultipart
# import os
from os import listdir
import docx
# import re
from re import match
import xlsxwriter
from xlrd import XLRDError
import datetime
import copy
# from copy import
class ViewController():
def __init__(self, app, title, needReturn=False):
self.app = app
self.app.title(title)
self.frame = Frame(app)
self.frame.pack()
self.setupUI(self.frame)
if needReturn:
self.setupReturn(self.frame)
def setupReturn(self, frame):
Button(frame, text=' < ', bg='CornflowerBlue', fg='GhostWhite', command=self.back).grid(row=0, column=0, sticky= W + N)
def back(self):
self.frame.destroy()
FuncViewController(self.app, 'Welcome')
def setupUI(self, frame):
pass
class FuncViewController(ViewController):
def setupUI(self, frame):
super().setupUI(frame)
Button(frame, text='Split', width=10, height=2, bg='CornflowerBlue', fg='GhostWhite', font=("Arial, 12"), command=lambda index=0: self.navTo(index)).grid(row=1, column=0, padx=5, pady=10)
Button(frame, text='Mail', width=10, height=2, bg='CornflowerBlue', fg='GhostWhite', font=("Arial, 12"), command=lambda index=1: self.navTo(index)).grid(row=1, column=1, padx=5)
def navTo(self, index):
self.frame.destroy()
if index == 0:
SplitViewController(self.app, 'Split master report', True)
else:
MailViewController(self.app, 'Send Emails to cost center owners', True)
class SplitViewController(ViewController):
def setupUI(self, frame):
super().setupUI(frame)
rawDataPath = StringVar()
savePath = StringVar()
#raw data
Label(frame, text='Raw data').grid(row=1, column=0, sticky=W)
Entry(frame, width=25, textvariable=rawDataPath).grid(row=1, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=rawDataPath: self.excelSelection(arg)).grid(row=1, column=2)
#save path
Label(frame, text='Save path').grid(row=4, column=0, sticky=W)
Entry(frame, width=25, textvariable=savePath).grid(row=4, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=savePath: self.dirSelection(arg)).grid(row=4, column=2)
#Split
self.splitBtn = Button(frame, width=6, text='Split', bg='lime green', fg='GhostWhite', command=lambda rawData=rawDataPath, save=savePath: self.threadIt(self.split, rawData, save))
self.splitBtn.grid(row=5, column=2)
#select file
def excelSelection(self, path):
tempStr = askopenfilename(filetypes=[('Excel', '*.xlsx'), ('All Files', '*')])
path.set(tempStr)
# select dir
def dirSelection(self, path):
tempStr = askdirectory()
path.set(tempStr)
#split
def split(self, rawData, save):
if len(rawData.get()) * len(save.get()) == 0:
showinfo(title='Oops', message='Something is missing')
return
# self.app.title('Spliting...')
# self.splitBtn['state'] = 'disabled'
# self.splitBtn['text'] = 'Spliting...'
self.isrunning(True)
try:
rawdf = pd.DataFrame(pd.read_excel(rawData.get(), sheet_name='Sheet1'))
except BaseException as e:
# print(type(e))
# print(e.values)
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
self.isrunning(False)
return
values = ['Asset Number',
'Sub-number',
'Historical Asset Number',
'Asset description',
'Cost Center',
'Capitalization Date',
'Original Value',
'Accumulated Depreciation',
'Net value',
]
try:
tempdf = rawdf[values].copy()
except BaseException as e:
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
self.isrunning(False)
return
# tempdf['Asset Number'] = tempdf['Asset Number'].astype('str')
tempdf['Asset Number'] = tempdf['Asset Number'].map(lambda x: '{:.0f}'.format(x))
tempdf['Remark/Comment'] = ''
tempdf['Capitalization Date'] = tempdf['Capitalization Date'].dt.strftime('%m/%d/%Y')
ccdfs = tempdf.groupby(['Cost Center'])
# mapdf = tempdf['Cost Center'].drop_duplicates()
for cc, ccdf in ccdfs:
# subdf = tempdf.loc[rawdf['Cost Center']==cc]
# importSub = subdf.copy()
tempdf = ccdf.append({'Asset Number': 'Total', 'Net value': ccdf['Net value'].sum()}, ignore_index=True)
# print(tempdf)
self.saveSub(tempdf, cc, save.get())
# subdf.to_excel(save.get() + '\\' + str(cc) + '.xlsx', index=False)
self.isrunning(False)
# self.splitBtn['state'] = 'normal'
# self.splitBtn['text'] = 'Split'
# self.app.title('Spliting ended')
def isrunning(self, state: bool):
self.app.title('Spliting...' if state else 'Spliting ended')
self.splitBtn['state'] = 'disabled' if state else 'normal'
self.splitBtn['text'] = 'Spliting...' if state else 'Split'
def saveSub(self, df, cc, save):
writer = pd.ExcelWriter(save + '\\' + 'Monthly FA report {:.0f}'.format(cc) + '.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', index=False)#, startrow=1, header=False
workbook = writer.book
worksheet = writer.sheets['Sheet1']
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'align': 'center',
'border': 1,
})
# print(df.head())
for col_num, value in enumerate(df.columns.values):
worksheet.write(0, col_num, value, header_format)
overall_format = workbook.add_format({
'border': 1,
'text_wrap': True,
'valign': 'vcenter',
})
# pd.io.formats.excel.header_style = None
worksheet.set_column(0, 0, 15, overall_format)
worksheet.set_column(1, 1, 11, overall_format)
worksheet.set_column(2, 2, 11, overall_format)
worksheet.set_column(3, 3, 50, overall_format)
worksheet.set_column(4, 8, 12, overall_format)
worksheet.set_column(9, 9, 16, overall_format)
# worksheet.set_row(0, None, header_format)
try:
writer.save()
except BaseException as e:
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
self.isrunning(False)
return
def threadIt(self, func, *args):
t = Thread(target=func, args=args)
t.setDaemon(True)
t.start()
class MailViewController(ViewController):
def setupUI(self, frame):
super().setupUI(frame)
senderStr = StringVar()
subPath = StringVar()
toListPath = StringVar()
copyListPath = StringVar()
bodyPath = StringVar()
#sender
Label(frame, text='Sender:').grid(row=1, column=0, sticky=W)
Entry(frame, width=25, textvariable=senderStr).grid(row=1, column=1, pady=5, padx=5)
#cc list
Label(frame, text='Email to:').grid(row=2, column=0, sticky=W)
Entry(frame, width=25, textvariable=toListPath).grid(row=2, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=toListPath: self.excelSelection(arg)).grid(row=2, column=2)
#copy list
Label(frame, text='Copy to:').grid(row=3, column=0, sticky=W)
Entry(frame, width=25, textvariable=copyListPath).grid(row=3, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=copyListPath: self.excelSelection(arg)).grid(row=3, column=2)
#body
Label(frame, text='Email body:').grid(row=4, column=0, sticky=W)
Entry(frame, width=25, textvariable=bodyPath).grid(row=4, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=bodyPath: self.docSelection(arg)).grid(row=4, column=2)
#sub path
Label(frame, text='Sub path:').grid(row=5, column=0, sticky=W)
Entry(frame, width=25, textvariable=subPath).grid(row=5, column=1, pady=5, padx=5)
Button(frame, text='Choose', command=lambda arg=subPath: self.dirSelection(arg)).grid(row=5, column=2)
#Check
Button(frame, width=6, text='Check', bg='lime green', fg='GhostWhite', command=lambda cc=toListPath, sub=subPath: self.threadIt(self.checkFiles, cc, sub)).grid(row=6, column=1, sticky='E')
#Mail
self.mailBtn = Button(frame, width=6, text='Mail', bg='lime green', fg='GhostWhite', command=lambda sender=senderStr, to=toListPath, copy=copyListPath, body=bodyPath, sub=subPath: self.threadIt(self.mail, sender, to, copy, body, sub))
self.mailBtn.grid(row=6, column=2)
#select file
def docSelection(self, path):
tempStr = askopenfilename(filetypes=[('Web Page', '*.htm'), ('All Files', '*')], title = "Select Email body")
path.set(tempStr)
def excelSelection(self, path):
tempStr = askopenfilename(filetypes=[('Excel', '*.xlsx'), ('All Files', '*')], title = "Select FA coordinator list")
path.set(tempStr)
# select dir
def dirSelection(self, path):
tempStr = askdirectory(title = "Select sub files directory")
path.set(tempStr)
#check
def checkFiles(self, cc, sub):
if len(cc.get()) * len(sub.get()) == 0:
showinfo(title='Oops', message='Something is missing')
return
self.app.title('Checking...')
try:
ccdf = pd.DataFrame(pd.read_excel(cc.get(), sheet_name='Sheet1'))
tempdf = ccdf['Cost Center'].copy().astype('str')
except BaseException as e:
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
self.app.title('Check Finished...')
return
ccList = tempdf.drop_duplicates().values.tolist()
tempList = listdir(sub.get())
# fileFullList = list(map(self.getFiles, tempList))
# showinfo(title='Oops', message='No sub files') if len(fileFullList) ==
try:
fileList = list(map(self.getFiles, tempList))
except BaseException as e:
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
self.app.title('Check Finished...')
return
fileDifccSet = set(fileList) - set(ccList)
# difcc = set(ccList) - set(fileList)
fileDifccStr = ','.join(fileDifccSet)
# difStrcc = ','.join(difcc)
if len(fileDifccStr) != 0:
strFile = 'Sub file(s) of Cost Center(s): {} cannot find responding info in CC&FA mapping list.'.format(fileDifccStr)
showinfo(title='Oops', message='{0}'.format(strFile))
else:
showinfo(title='Oops', message='Sub file(s) of Cost Center(s) totally matches CC&FA mapping list.')
self.app.title('Check Finished...')
# else:
# strFile = ''
# # if len(difStrFile) != 0:
# # strcc = 'Cost Center(s): {} does not have matching sub files'.format(difStrcc)
# # else:
# # strcc = ''
# if len(strFile) != 0 or len(strcc) != 0:
# showinfo(title='Oops', message='{0}'.format(strFile))
# self.app.title('Check Finished...')
def isrunning(self, state: bool):
self.app.title('Sending...' if state else 'Send Finished.')
self.mailBtn['state'] = 'disabled' if state else 'normal'
self.mailBtn['text'] = 'Sending...' if state else 'Split'
#mail
def mail(self, sender, to, copy, body, sub):
if len(sender.get()) * len(to.get()) * len(body.get()) * len(sub.get()) == 0:
showinfo(title='Oops', message='Something is missing')
return
# self.app.title('Sending...')
# self.mailBtn['state'] = 'disabled'
# self.mailBtn['text'] = 'Sending...'
self.isrunning(True)
try:
todf = pd.DataFrame(pd.read_excel(to.get(), sheet_name='Sheet2'))
# if copy.get() != '':
copydf = pd.DataFrame(pd.read_excel(copy.get(), sheet_name='Sheet2')) if copy.get() != '' else None
except BaseException as e:
showinfo(title=f'{e.__class__.__name__}', message=f'{e}')
return
self.sendMail(sender, todf, copydf, body, sub)
self.isrunning(False)
# self.app.title('Send Finished...')
# self.mailBtn['state'] = 'normal'
# self.mailBtn['text'] = 'Mail'
def sendMail(self, sender, todf, copydf, body, sub):
smtp_server = 'rb-smtp-int.bosch.com'
port = 25
senderStr = sender.get()
if not self.checkEmail(senderStr):
showinfo(title='Format Error', message='Please check Sender email.')
return
# if not re.match(r'^[a-zA-Z0-9_.-]+@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.[a-zA-Z0-9]{2,6}$',senderStr):
# showinfo(title='Format Error', message='Please check Sender email.')
# return
#email body
# paras = ''
# doc = docx.Document(body.get())
# for para in doc.paragraphs:
# temp = '<p>{}</p>'.format(para.text)
# paras = paras + temp
with open(body.get()) as f:
paras = f.read()
# print(paras)
errorList = []
succList = []
for index, row in todf.iterrows():
# if not(re.match(r'^[a-zA-Z0-9_.-]+@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.[a-zA-Z0-9]{2,6}$',row[1])):
# errorList.append(str(index + 1) + '. ' + row[1])
# continue
if not self.checkEmail(row[1]):
errorList.append(str(index + 1) + '. ' + row[1])
continue
receiver = f'{row[1]}'
msg = MIMEMultipart()
msg['Subject'] = '{} FA Report'.format(row[0])
# msg['From'] = '<EMAIL>'
msg['To'] = receiver
if row[0] in copydf['Cost Center']:
msg['Copy'] = copydf[]
# head = '<b>Dear {},</b>'.format(row[2])
bodyStr = copy.copy(paras)
bodyStr = bodyStr.replace('{0}', f'{row[2]}')
# print(row[2])
# return
# body = paras.format(receiver)
msg.attach(MIMEText(bodyStr, 'html', 'utf-8'))
filePath = r'{0}\Monthly FA report {1}.xlsx'.format(sub.get(), row[0])
if not isfile(filePath):
continue
attach = MIMEText(open(filePath, 'rb').read(), 'base64', 'utf-8')
attach['Content-Disposition'] = 'attachment; filename="Monthly FA report {}.xlsx"'.format(row[0])
msg.attach(attach)
try:
server = smtplib.SMTP(smtp_server, port)
server.sendmail(senderStr, receiver, msg.as_string())
server.quit()
succList.append(str(index + 1) + '. ' + row[1])
except Exception as e:
errorList.append(str(index + 1) + '. ' + row[1])
continue
self.saveLog(succList, errorList)
def checkEmail(self, emailStr):
return match(r'^[a-zA-Z0-9_.-]+@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.[a-zA-Z0-9]{2,6}$',emailStr)
def saveLog(self, succList, errorList):
now = datetime.datetime.now()
fileName = 'Log_file_{}'.format(now.strftime('%Y%m%d'))
logPath = asksaveasfilename(initialfile=fileName, defaultextension=".xlsx", title = "Save log file to...",filetypes = (('Excel', '*.xlsx'),("all files","*.*")))
if not logPath: return
writer = pd.ExcelWriter(logPath)
| |
<gh_stars>0
import warnings
warnings.filterwarnings("once", category=DeprecationWarning) # noqa: E402
import os
from functools import partial
import shutil
import unittest
import copy
import time
import numpy as np
import pandas as pd
import shapely.geometry as shpg
from numpy.testing import assert_allclose
import pytest
# Local imports
import oggm
from oggm.core import massbalance
from oggm.core.massbalance import LinearMassBalance
import xarray as xr
from oggm import utils, workflow, tasks, cfg
from oggm.core import gcm_climate, climate, inversion, centerlines
from oggm.cfg import SEC_IN_DAY, SEC_IN_YEAR, SEC_IN_MONTH
from oggm.utils import get_demo_file
from oggm.tests.funcs import init_hef, get_test_dir, patch_url_retrieve_github
from oggm.tests.funcs import (dummy_bumpy_bed, dummy_constant_bed,
dummy_constant_bed_cliff,
dummy_mixed_bed,
dummy_noisy_bed, dummy_parabolic_bed,
dummy_trapezoidal_bed, dummy_width_bed,
dummy_width_bed_tributary)
import matplotlib.pyplot as plt
from oggm.core.flowline import (FluxBasedModel, FlowlineModel,
init_present_time_glacier, glacier_from_netcdf,
RectangularBedFlowline, TrapezoidalBedFlowline,
ParabolicBedFlowline, MixedBedFlowline,
flowline_from_dataset, FileModel,
run_constant_climate, run_random_climate,
run_from_climate_data)
FluxBasedModel = partial(FluxBasedModel, inplace=True)
FlowlineModel = partial(FlowlineModel, inplace=True)
_url_retrieve = None
pytest.importorskip('geopandas')
pytest.importorskip('rasterio')
pytest.importorskip('salem')
def setup_module(module):
module._url_retrieve = utils.oggm_urlretrieve
oggm.utils._downloads.oggm_urlretrieve = patch_url_retrieve_github
def teardown_module(module):
oggm.utils._downloads.oggm_urlretrieve = module._url_retrieve
pytestmark = pytest.mark.test_env("models")
do_plot = False
DOM_BORDER = 80
class TestInitFlowline(unittest.TestCase):
def setUp(self):
gdir = init_hef(border=DOM_BORDER)
self.testdir = os.path.join(get_test_dir(), type(self).__name__)
utils.mkdir(self.testdir, reset=True)
self.gdir = tasks.copy_to_basedir(gdir, base_dir=self.testdir,
setup='all')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
if os.path.exists(self.testdir):
shutil.rmtree(self.testdir)
def test_init_present_time_glacier(self):
gdir = self.gdir
init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
ofl = gdir.read_pickle('inversion_flowlines')[-1]
self.assertTrue(gdir.rgi_date == 2003)
self.assertTrue(len(fls) == 3)
vol = 0.
area = 0.
for fl in fls:
refo = 1 if fl is fls[-1] else 0
self.assertTrue(fl.order == refo)
ref = np.arange(len(fl.surface_h)) * fl.dx
np.testing.assert_allclose(ref, fl.dis_on_line,
rtol=0.001,
atol=0.01)
self.assertTrue(len(fl.surface_h) ==
len(fl.bed_h) ==
len(fl.bed_shape) ==
len(fl.dis_on_line) ==
len(fl.widths))
self.assertTrue(np.all(fl.widths >= 0))
vol += fl.volume_km3
area += fl.area_km2
if refo == 1:
rmsd = utils.rmsd(ofl.widths[:-5] * gdir.grid.dx,
fl.widths_m[0:len(ofl.widths)-5])
self.assertTrue(rmsd < 5.)
rtol = 0.02
np.testing.assert_allclose(0.573, vol, rtol=rtol)
np.testing.assert_allclose(6900.0, fls[-1].length_m, atol=101)
np.testing.assert_allclose(gdir.rgi_area_km2, area, rtol=rtol)
if do_plot:
plt.plot(fls[-1].bed_h)
plt.plot(fls[-1].surface_h)
plt.show()
def test_present_time_glacier_massbalance(self):
gdir = self.gdir
init_present_time_glacier(gdir)
mb_mod = massbalance.PastMassBalance(gdir)
fls = gdir.read_pickle('model_flowlines')
glacier = FlowlineModel(fls)
mbdf = gdir.get_ref_mb_data()
hgts = np.array([])
widths = np.array([])
for fl in glacier.fls:
hgts = np.concatenate((hgts, fl.surface_h))
widths = np.concatenate((widths, fl.widths_m))
tot_mb = []
refmb = []
grads = hgts * 0
for yr, mb in mbdf.iterrows():
refmb.append(mb['ANNUAL_BALANCE'])
mbh = (mb_mod.get_annual_mb(hgts, yr) * SEC_IN_YEAR *
cfg.PARAMS['ice_density'])
grads += mbh
tot_mb.append(np.average(mbh, weights=widths))
grads /= len(tot_mb)
# Bias
self.assertTrue(np.abs(utils.md(tot_mb, refmb)) < 50)
# Gradient
dfg = gdir.get_ref_mb_profile().mean()
# Take the altitudes below 3100 and fit a line
dfg = dfg[dfg.index < 3100]
pok = np.where(hgts < 3100)
from scipy.stats import linregress
slope_obs, _, _, _, _ = linregress(dfg.index, dfg.values)
slope_our, _, _, _, _ = linregress(hgts[pok], grads[pok])
np.testing.assert_allclose(slope_obs, slope_our, rtol=0.15)
class TestOtherGlacier(unittest.TestCase):
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp_div')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
# self.clean_dir()
# Init
cfg.initialize()
cfg.set_intersects_db(get_demo_file('rgi_intersect_oetztal.shp'))
cfg.PATHS['dem_file'] = get_demo_file('srtm_oetztal.tif')
cfg.PATHS['climate_file'] = get_demo_file('histalp_merged_hef.nc')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_define_divides(self):
from oggm.core import centerlines
from oggm.core import climate
from oggm.core import inversion
from oggm.core import gis
from oggm import GlacierDirectory
import geopandas as gpd
hef_file = utils.get_demo_file('rgi_oetztal.shp')
rgidf = gpd.read_file(hef_file)
# This is another glacier with divides
entity = rgidf.loc[rgidf.RGIId == 'RGI50-11.00719_d01'].iloc[0]
gdir = GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir, entity=entity)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
cfg.PARAMS['baseline_climate'] = ''
climate.process_custom_climate_data(gdir)
climate.local_t_star(gdir, tstar=1930, bias=0)
climate.mu_star_calibration(gdir)
inversion.prepare_for_inversion(gdir)
v, ainv = inversion.mass_conservation_inversion(gdir)
init_present_time_glacier(gdir)
myarea = 0.
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
myarea += np.sum(cl.widths * cl.dx * gdir.grid.dx**2)
np.testing.assert_allclose(ainv, gdir.rgi_area_m2, rtol=1e-2)
np.testing.assert_allclose(myarea, gdir.rgi_area_m2, rtol=1e-2)
myarea = 0.
cls = gdir.read_pickle('inversion_flowlines')
for cl in cls:
myarea += np.sum(cl.widths * cl.dx * gdir.grid.dx**2)
np.testing.assert_allclose(myarea, gdir.rgi_area_m2, rtol=1e-2)
fls = gdir.read_pickle('model_flowlines')
if cfg.PARAMS['grid_dx_method'] == 'square':
self.assertEqual(len(fls), 3)
vol = 0.
area = 0.
for fl in fls:
ref = np.arange(len(fl.surface_h)) * fl.dx
np.testing.assert_allclose(ref, fl.dis_on_line,
rtol=0.001,
atol=0.01)
self.assertTrue(len(fl.surface_h) ==
len(fl.bed_h) ==
len(fl.bed_shape) ==
len(fl.dis_on_line) ==
len(fl.widths))
self.assertTrue(np.all(fl.widths >= 0))
vol += fl.volume_km3
area += fl.area_km2
rtol = 0.08
np.testing.assert_allclose(gdir.rgi_area_km2, area, rtol=rtol)
np.testing.assert_allclose(v*1e-9, vol, rtol=rtol)
class TestMassBalance(unittest.TestCase):
def setUp(self):
gdir = init_hef(border=DOM_BORDER)
self.testdir = os.path.join(get_test_dir(), type(self).__name__)
utils.mkdir(self.testdir, reset=True)
self.gdir = tasks.copy_to_basedir(gdir, base_dir=self.testdir,
setup='all')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
if os.path.exists(self.testdir):
shutil.rmtree(self.testdir)
def test_past_mb_model(self):
rho = cfg.PARAMS['ice_density']
F = SEC_IN_YEAR * rho
gdir = self.gdir
init_present_time_glacier(gdir)
df = gdir.read_json('local_mustar')
mu_star = df['mu_star_glacierwide']
bias = df['bias']
# Climate period
yrp = [1851, 2000]
# Flowlines height
h, w = gdir.get_inversion_flowline_hw()
_, t, p = climate.mb_yearly_climate_on_height(gdir, h,
year_range=yrp)
mb_mod = massbalance.PastMassBalance(gdir, bias=0)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * F
np.testing.assert_allclose(ref_mb_on_h, my_mb_on_h,
atol=1e-2)
ela_z = mb_mod.get_ela(year=yr)
totest = mb_mod.get_annual_mb([ela_z], year=yr) * F
assert_allclose(totest[0], 0, atol=1)
mb_mod = massbalance.PastMassBalance(gdir)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * F
np.testing.assert_allclose(ref_mb_on_h, my_mb_on_h + bias,
atol=1e-2)
ela_z = mb_mod.get_ela(year=yr)
totest = mb_mod.get_annual_mb([ela_z], year=yr) * F
assert_allclose(totest[0], 0, atol=1)
for i, yr in enumerate(np.arange(yrp[0], yrp[1]+1)):
ref_mb_on_h = p[:, i] - mu_star * t[:, i]
my_mb_on_h = ref_mb_on_h*0.
for m in np.arange(12):
yrm = utils.date_to_floatyear(yr, m + 1)
tmp = mb_mod.get_monthly_mb(h, yrm) * SEC_IN_MONTH * rho
my_mb_on_h += tmp
np.testing.assert_allclose(ref_mb_on_h,
my_mb_on_h + bias,
atol=1e-2)
# real data
h, w = gdir.get_inversion_flowline_hw()
mbdf = gdir.get_ref_mb_data()
mbdf.loc[yr, 'MY_MB'] = np.NaN
mb_mod = massbalance.PastMassBalance(gdir)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * rho
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean(),
mbdf['MY_MB'].mean(),
atol=1e-2)
mbdf['MY_ELA'] = mb_mod.get_ela(year=mbdf.index.values)
assert mbdf[['MY_ELA', 'MY_MB']].corr().values[0, 1] < -0.9
assert mbdf[['MY_ELA', 'ANNUAL_BALANCE']].corr().values[0, 1] < -0.7
mb_mod = massbalance.PastMassBalance(gdir, bias=0)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * rho
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean() + bias,
mbdf['MY_MB'].mean(),
atol=1e-2)
mb_mod = massbalance.PastMassBalance(gdir)
for yr in mbdf.index.values:
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * rho
mbdf.loc[yr, 'MY_MB'] = np.average(my_mb_on_h, weights=w)
mb_mod.temp_bias = 1
my_mb_on_h = mb_mod.get_annual_mb(h, yr) * SEC_IN_YEAR * rho
mbdf.loc[yr, 'BIASED_MB'] = np.average(my_mb_on_h, weights=w)
mb_mod.temp_bias = 0
np.testing.assert_allclose(mbdf['ANNUAL_BALANCE'].mean(),
mbdf['MY_MB'].mean(),
atol=1e-2)
self.assertTrue(mbdf.ANNUAL_BALANCE.mean() > mbdf.BIASED_MB.mean())
# Repeat
mb_mod = massbalance.PastMassBalance(gdir, repeat=True,
ys=1901, ye=1950)
yrs = np.arange(100) + 1901
mb = mb_mod.get_specific_mb(h, w, year=yrs)
assert_allclose(mb[50], mb[-50])
# Go for glacier wide now
fls = gdir.read_pickle('inversion_flowlines')
mb_gw_mod = massbalance.MultipleFlowlineMassBalance(gdir, fls=fls,
repeat=True,
ys=1901, ye=1950)
mb_gw = mb_gw_mod.get_specific_mb(year=yrs)
assert_allclose(mb, mb_gw)
def test_glacierwide_mb_model(self):
gdir = self.gdir
init_present_time_glacier(gdir)
fls = gdir.read_pickle('model_flowlines')
h = np.array([])
w = np.array([])
for fl in fls:
w = np.append(w, fl.widths)
h = np.append(h, fl.surface_h)
yrs = np.arange(100) + 1901
classes = [massbalance.PastMassBalance,
massbalance.ConstantMassBalance,
massbalance.RandomMassBalance]
for cl in classes:
if cl is massbalance.RandomMassBalance:
kwargs = {'seed': 0}
else:
kwargs = {}
mb = cl(gdir, **kwargs)
mb_gw = massbalance.MultipleFlowlineMassBalance(gdir, fls=fls,
mb_model_class=cl,
**kwargs)
assert_allclose(mb.get_specific_mb(h, w, year=yrs),
mb_gw.get_specific_mb(year=yrs))
assert_allclose(mb.get_ela(year=yrs),
mb_gw.get_ela(year=yrs))
_h, _w, mbs_gw = mb_gw.get_annual_mb_on_flowlines(year=1950)
mbs_h = mb.get_annual_mb(_h, year=1950)
assert_allclose(mbs_h, mbs_gw)
mb.bias = 100
mb_gw.bias = 100
assert_allclose(mb.get_specific_mb(h, w, year=yrs[:10]),
mb_gw.get_specific_mb(year=yrs[:10]))
assert_allclose(mb.get_ela(year=yrs[:10]),
mb_gw.get_ela(year=yrs[:10]))
mb.temp_bias = 100
mb_gw.temp_bias = 100
assert mb.temp_bias == mb_gw.temp_bias
assert_allclose(mb.get_specific_mb(h, w, year=yrs[:10]),
mb_gw.get_specific_mb(year=yrs[:10]))
assert_allclose(mb.get_ela(year=yrs[:10]),
mb_gw.get_ela(year=yrs[:10]))
mb.prcp_bias = 100
mb_gw.prcp_bias = 100
assert mb.prcp_bias == mb_gw.prcp_bias
assert_allclose(mb.get_specific_mb(h, w, year=yrs[:10]),
mb_gw.get_specific_mb(year=yrs[:10]))
assert_allclose(mb.get_ela(year=yrs[:10]),
mb_gw.get_ela(year=yrs[:10]))
cl = massbalance.PastMassBalance
mb = cl(gdir)
mb_gw = massbalance.MultipleFlowlineMassBalance(gdir,
mb_model_class=cl)
mb = massbalance.UncertainMassBalance(mb, rdn_bias_seed=1,
rdn_prcp_bias_seed=2,
rdn_temp_bias_seed=3)
mb_gw = massbalance.UncertainMassBalance(mb_gw, rdn_bias_seed=1,
rdn_prcp_bias_seed=2,
rdn_temp_bias_seed=3)
assert_allclose(mb.get_specific_mb(h, w, year=yrs[:30]),
mb_gw.get_specific_mb(fls=fls, year=yrs[:30]))
# ELA won't pass because of API incompatibility
# assert_allclose(mb.get_ela(year=yrs[:30]),
# mb_gw.get_ela(year=yrs[:30]))
def test_constant_mb_model(self):
rho = cfg.PARAMS['ice_density']
gdir = self.gdir
init_present_time_glacier(gdir)
df = gdir.read_json('local_mustar')
bias = df['bias']
h, w = gdir.get_inversion_flowline_hw()
cmb_mod = massbalance.ConstantMassBalance(gdir, bias=0)
ombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * rho
otmb = np.average(ombh, weights=w)
np.testing.assert_allclose(0., otmb, atol=0.2)
cmb_mod = massbalance.ConstantMassBalance(gdir)
ombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * rho
otmb = np.average(ombh, weights=w)
np.testing.assert_allclose(0, otmb + bias, atol=0.2)
mb_mod = massbalance.ConstantMassBalance(gdir, y0=2003 - 15)
nmbh = mb_mod.get_annual_mb(h) * SEC_IN_YEAR * rho
ntmb = np.average(nmbh, weights=w)
self.assertTrue(ntmb < otmb)
if do_plot: # pragma: no cover
plt.plot(h, ombh, 'o', label='tstar')
plt.plot(h, nmbh, 'o', label='today')
plt.legend()
plt.show()
cmb_mod.temp_bias = 1
biasombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * rho
biasotmb = np.average(biasombh, weights=w)
self.assertTrue(biasotmb < (otmb - 500))
cmb_mod.temp_bias = 0
nobiasombh = cmb_mod.get_annual_mb(h) * SEC_IN_YEAR * rho
nobiasotmb = np.average(nobiasombh, weights=w)
np.testing.assert_allclose(0, nobiasotmb + bias, atol=0.2)
months = np.arange(12)
monthly_1 = months * 0.
monthly_2 = months * 0.
for m in months:
yr = utils.date_to_floatyear(0, m + 1)
cmb_mod.temp_bias = 0
tmp = cmb_mod.get_monthly_mb(h, yr) * SEC_IN_MONTH * rho
monthly_1[m] = np.average(tmp, weights=w)
cmb_mod.temp_bias = 1
tmp = cmb_mod.get_monthly_mb(h, yr) * SEC_IN_MONTH * rho
monthly_2[m] = np.average(tmp, weights=w)
# check that the winter months are | |
'command': 'image_get_all',
'kwargs': {'limit': 1},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
images = jsonutils.loads(res.body)[0]
self.assertEqual(200, res.status_int)
self._compare_images_and_uuids([UUID4], images)
def test_get_index_limit_marker(self):
"""Tests that the registry API returns list of public images.
Must conforms to limit and marker query params.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid3_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'limit': 1},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(200, res.status_int)
images = res_dict
self._compare_images_and_uuids([UUID2], images)
def test_get_index_filter_name(self):
"""Tests that the registry API returns list of public images.
Use a specific name. This is really a sanity check, filtering is
tested more in-depth using /images/detail
"""
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'name': 'new name! #123'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(200, res.status_int)
images = res_dict
self.assertEqual(2, len(images))
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_get_index_filter_on_user_defined_properties(self):
"""Tests that the registry API returns list of public images.
Use a specific user-defined properties.
"""
properties = {'distro': 'ubuntu', 'arch': 'i386', 'type': 'kernel'}
extra_id = _gen_uuid()
extra_fixture = {'id': extra_id,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'image-extra-1',
'size': 19, 'properties': properties,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
# testing with a common property.
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(2, len(images))
self.assertEqual(extra_id, images[0]['id'])
self.assertEqual(UUID1, images[1]['id'])
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with a non-existent property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with multiple existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'distro': 'ubuntu'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(1, len(images))
self.assertEqual(extra_id, images[0]['id'])
# testing with multiple existing properties but non-existent values.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random', 'distro': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with multiple non-existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'typo': 'random', 'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with one existing property and the other non-existing.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
def test_get_index_sort_default_created_at_desc(self):
"""Tests that the registry API returns list of public images.
Must conforms to a default sort key/dir.
"""
uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid5_time + datetime.timedelta(seconds=5)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid5_time,
'updated_at': uuid5_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(200, res.status_int)
images = res_dict
# (flaper87)registry's v1 forced is_public to True
# when no value was specified. This is not
# the default behaviour anymore.
uuid_list = [UUID3, UUID4, UUID5, UUID2, UUID1]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_name_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['name'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_status_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by status in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'queued',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['status'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID2, UUID4, UUID3]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_disk_format_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by disk_format in ascending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vdi',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['disk_format'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID3, UUID4, UUID2]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_container_format_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by container_format in descending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'iso',
'container_format': 'bare',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['container_format'],
'sort_dir': ['desc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID2, UUID4, UUID3, UUID1]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_size_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 100,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
| |
def algorithms(self):
"""Returns a list of stemming algorithms provided by the py-stemmer
library.
"""
import Stemmer # @UnresolvedImport
return Stemmer.algorithms()
def cache_info(self):
return None
def _get_stemmer_fn(self):
import Stemmer # @UnresolvedImport
stemmer = Stemmer.Stemmer(self.lang)
stemmer.maxCacheSize = self.cachesize
return stemmer.stemWord
def __getstate__(self):
# Can't pickle a dynamic function, so we have to remove the _stem
# attribute from the state
return dict([(k, self.__dict__[k]) for k in self.__dict__
if k != "_stem"])
def __setstate__(self, state):
# Check for old instances of StemFilter class, which didn't have a
# cachesize attribute and pickled the cache attribute
if "cachesize" not in state:
self.cachesize = 10000
if "ignores" in state:
self.ignore = state["ignores"]
elif "ignore" not in state:
self.ignore = frozenset()
if "cache" in state:
del state["cache"]
self.__dict__.update(state)
# Set the _stem attribute
self._stem = self._get_stemmer_fn()
class CharsetFilter(Filter):
"""Translates the text of tokens by calling unicode.translate() using the
supplied character mapping object. This is useful for case and accent
folding.
The ``whoosh.support.charset`` module has a useful map for accent folding.
>>> from whoosh.support.charset import accent_map
>>> retokenizer = RegexTokenizer()
>>> chfilter = CharsetFilter(accent_map)
>>> [t.text for t in chfilter(retokenizer(u'café'))]
[u'cafe']
Another way to get a character mapping object is to convert a Sphinx
charset table file using
:func:`whoosh.support.charset.charset_table_to_dict`.
>>> from whoosh.support.charset import charset_table_to_dict
>>> from whoosh.support.charset import default_charset
>>> retokenizer = RegexTokenizer()
>>> charmap = charset_table_to_dict(default_charset)
>>> chfilter = CharsetFilter(charmap)
>>> [t.text for t in chfilter(retokenizer(u'Stra\\xdfe'))]
[u'strase']
The Sphinx charset table format is described at
http://www.sphinxsearch.com/docs/current.html#conf-charset-table.
"""
__inittypes__ = dict(charmap=dict)
def __init__(self, charmap):
"""
:param charmap: a dictionary mapping from integer character numbers to
unicode characters, as required by the unicode.translate() method.
"""
self.charmap = charmap
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.charmap == other.charmap)
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
charmap = self.charmap
for t in tokens:
t.text = t.text.translate(charmap)
yield t
class NgramFilter(Filter):
"""Splits token text into N-grams.
>>> rext = RegexTokenizer()
>>> stream = rext("hello there")
>>> ngf = NgramFilter(4)
>>> [token.text for token in ngf(stream)]
["hell", "ello", "ther", "here"]
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None, at=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit this
parameter, maxsize == minsize.
:param at: If 'start', only take N-grams from the start of each word.
if 'end', only take N-grams from the end of each word. Otherwise,
take all N-grams from the word (the default).
"""
self.min = minsize
self.max = maxsize or minsize
self.at = 0
if at == "start":
self.at = -1
elif at == "end":
self.at = 1
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.min == other.min and self.max == other.max
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
at = self.at
for t in tokens:
text = t.text
if len(text) < self.min:
continue
chars = t.chars
if chars:
startchar = t.startchar
# Token positions don't mean much for N-grams,
# so we'll leave the token's original position
# untouched.
if t.mode == "query":
size = min(self.max, len(t.text))
if at == -1:
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
t.text = text[0 - size:]
if chars:
t.startchar = t.endchar - size
yield t
else:
for start in xrange(0, len(text) - size + 1):
t.text = text[start:start + size]
if chars:
t.startchar = startchar + start
t.endchar = startchar + start + size
yield t
else:
if at == -1:
limit = min(self.max, len(text))
for size in xrange(self.min, limit + 1):
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
if chars:
original_startchar = t.startchar
start = max(0, len(text) - self.max)
for i in xrange(start, len(text) - self.min + 1):
t.text = text[i:]
if chars:
t.startchar = original_startchar + i
yield t
else:
for start in xrange(0, len(text) - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > len(text):
continue
t.text = text[start:end]
if chars:
t.startchar = startchar + start
t.endchar = startchar + end
yield t
class IntraWordFilter(Filter):
"""Splits words into subwords and performs optional transformations on
subword groups. This filter is funtionally based on yonik's
WordDelimiterFilter in Solr, but shares no code with it.
* Split on intra-word delimiters, e.g. `Wi-Fi` -> `Wi`, `Fi`.
* When splitwords=True, split on case transitions,
e.g. `PowerShot` -> `Power`, `Shot`.
* When splitnums=True, split on letter-number transitions,
e.g. `SD500` -> `SD`, `500`.
* Leading and trailing delimiter characters are ignored.
* Trailing possesive "'s" removed from subwords,
e.g. `O'Neil's` -> `O`, `Neil`.
The mergewords and mergenums arguments turn on merging of subwords.
When the merge arguments are false, subwords are not merged.
* `PowerShot` -> `0`:`Power`, `1`:`Shot` (where `0` and `1` are token
positions).
When one or both of the merge arguments are true, consecutive runs of
alphabetic and/or numeric subwords are merged into an additional token with
the same position as the last sub-word.
* `PowerShot` -> `0`:`Power`, `1`:`Shot`, `1`:`PowerShot`
* `A's+B's&C's` -> `0`:`A`, `1`:`B`, `2`:`C`, `2`:`ABC`
* `Super-Duper-XL500-42-AutoCoder!` -> `0`:`Super`, `1`:`Duper`, `2`:`XL`,
`2`:`SuperDuperXL`,
`3`:`500`, `4`:`42`, `4`:`50042`, `5`:`Auto`, `6`:`Coder`,
`6`:`AutoCoder`
When using this filter you should use a tokenizer that only splits on
whitespace, so the tokenizer does not remove intra-word delimiters before
this filter can see them, and put this filter before any use of
LowercaseFilter.
>>> rt = RegexTokenizer(r"\\S+")
>>> iwf = IntraWordFilter()
>>> lcf = LowercaseFilter()
>>> analyzer = rt | iwf | lcf
One use for this filter is to help match different written representations
of a concept. For example, if the source text contained `wi-fi`, you
probably want `wifi`, `WiFi`, `wi-fi`, etc. to match. One way of doing this
is to specify mergewords=True and/or mergenums=True in the analyzer used
for indexing, and mergewords=False / mergenums=False in the analyzer used
for querying.
>>> iwf_i = IntraWordFilter(mergewords=True, mergenums=True)
>>> iwf_q = IntraWordFilter(mergewords=False, mergenums=False)
>>> iwf = MultiFilter(index=iwf_i, query=iwf_q)
>>> analyzer = RegexTokenizer(r"\S+") | iwf | LowercaseFilter()
(See :class:`MultiFilter`.)
"""
is_morph = True
__inittypes__ = dict(delims=text_type, splitwords=bool, splitnums=bool,
mergewords=bool, mergenums=bool)
def __init__(self, delims=u("-_'\"()!@#$%^&*[]{}<>\|;:,./?`~=+"),
splitwords=True, splitnums=True,
mergewords=False, mergenums=False):
"""
:param delims: a string of delimiter characters.
:param splitwords: if True, split at case transitions,
e.g. `PowerShot` -> `Power`, `Shot`
:param splitnums: if True, split at letter-number transitions,
e.g. `SD500` -> `SD`, `500`
:param mergewords: merge consecutive runs of alphabetic subwords into
an additional token with the same position as the last subword.
:param mergenums: merge consecutive runs of numeric subwords into an
additional token with the same position as the last subword.
"""
from whoosh.support.unicode import digits, lowercase, uppercase
self.delims = re.escape(delims)
# Expression for text between delimiter characters
self.between = re.compile(u("[^%s]+") % (self.delims,), re.UNICODE)
# Expression for removing "'s" from the end of sub-words
dispat = u("(?<=[%s%s])'[Ss](?=$|[%s])") % (lowercase, uppercase,
self.delims)
self.possessive = re.compile(dispat, re.UNICODE)
# Expression for finding case and letter-number transitions
lower2upper = u("[%s][%s]") % (lowercase, uppercase)
letter2digit = u("[%s%s][%s]") % (lowercase, uppercase, digits)
digit2letter = u("[%s][%s%s]") % (digits, lowercase, uppercase)
if splitwords and splitnums:
splitpat = u("(%s|%s|%s)") % (lower2upper, letter2digit,
digit2letter)
self.boundary = re.compile(splitpat, re.UNICODE)
elif splitwords:
self.boundary = re.compile(text_type(lower2upper), re.UNICODE)
elif splitnums:
numpat = u("(%s|%s)") % (letter2digit, digit2letter)
self.boundary = re.compile(numpat, re.UNICODE)
self.splitting = splitwords or splitnums
self.mergewords = mergewords
self.mergenums = mergenums
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.__dict__ == other.__dict__
def _split(self, string):
bound = self.boundary
# Yields (startchar, endchar) pairs for each indexable substring in
# the given string, e.g. "WikiWord" -> (0, 4), (4, 8)
# Whether we're splitting on transitions (case changes, letter -> num,
# num -> letter, etc.)
splitting = self.splitting
# Make a list (dispos, for "dispossessed") of (startchar, endchar)
# pairs for runs of text | |
import json
import os
import pytest
from collections import OrderedDict
from rdflib import URIRef
from unittest import mock
from ..commands import generate_ontology as go
from ..commands.owltools import Owler
pytestmark = [pytest.mark.setone, pytest.mark.working]
def test_parse_args_defaults():
args = []
args = go.parse_args(args)
assert args.ontology == 'all'
assert args.key is None
assert args.env == 'data'
@pytest.fixture
def connection():
return {
"server": "https://data.4dnucleome.org/",
"key": "testkey",
"secret": "testsecret"
}
@pytest.fixture
def slim_terms():
return [
{
"uuid": "111119bc-8535-4448-903e-854af460a233",
"term_name": "ectoderm",
"term_id": "UBERON:0000924",
"is_slim_for": "developmental",
},
{
"uuid": "111122bc-8535-4448-903e-854af460a233",
"preferred_name": "3D chromatin structure",
"term_name": "chromosome conformation identification objective",
"term_id": "OBI:0001917",
"is_slim_for": "assay"
}
]
def test_connect2server(connection):
# parameters we pass in don't really matter
key = "{'server': 'https://data.4dnucleome.org/', 'key': 'testkey', 'secret': 'testsecret'}"
with mock.patch('encoded.commands.generate_ontology.get_authentication_with_server', return_value=connection):
retval = go.connect2server(None, key)
assert retval == connection
# see ontology schema for full schema
# now synonym_terms and definition_terms are fully embedded
all_ontology = [{'download_url': 'http://www.ebi.ac.uk/efo/efo_inferred.owl',
'synonym_terms': [
'/ontology-terms/111111bc-8535-4448-903e-854af460a233/',
'/ontology-terms/111112bc-8535-4448-903e-854af460a233/'],
'@id': '/ontologys/530006bc-8535-4448-903e-854af460b254/',
'@type': ['Ontology', 'Item'],
'definition_terms': [
'/ontology-terms/111115bc-8535-4448-903e-854af460a233/',
'/ontology-terms/111116bc-8535-4448-903e-854af460a233/'],
'namespace_url': 'http://www.ebi.ac.uk/efo/',
'ontology_prefix': 'EFO',
'uuid': '530006bc-8535-4448-903e-854af460b254',
'ontology_name': 'Experimental Factor Ontology'
},
{'ontology_name': 'Uberon',
'@type': ['Ontology', 'Item'],
'ontology_prefix': 'UBERON',
'namespace_url': 'http://purl.obolibrary.org/obo/',
'download_url': 'http://purl.obolibrary.org/obo/uberon/composite-metazoan.owl',
'@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/',
'definition_terms': ['/ontology-terms/111116bc-8535-4448-903e-854af460a233/'],
'uuid': '530016bc-8535-4448-903e-854af460b254',
},
{'ontology_name': 'Ontology for Biomedical Investigations',
'@type': ['Ontology', 'Item'],
'ontology_prefix': 'OBI',
'namespace_url': 'http://purl.obolibrary.org/obo/',
'download_url': 'http://purl.obolibrary.org/obo/obi.owl',
'@id': '/ontologys/530026bc-8535-4448-903e-854af460b254/',
'definition_terms': [
{'term_name': 'definition',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000115',
'@id': '/ontology-terms/111116bc-8535-4448-903e-854af460a233/',
'uuid': '111116bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000115'
}
],
'uuid': '530026bc-8535-4448-903e-854af460b254',
'synonym_terms': [
{'term_name': 'alternative term',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000118',
'@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/',
'uuid': '111117bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118'
},
{'term_name': 'alternative term',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000118',
'@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/',
'uuid': '111117bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118'
}
]
}]
def get_fdn_ontology_side_effect(*args, **kwargs):
for i, arg in enumerate(args):
print('ARG', i, ' = ', arg)
if args[0] is not None:
return all_ontology[0]
else:
return all_ontology
def test_get_ontologies_all(connection):
prefixes = ['EFO', 'UBERON', 'OBI']
with mock.patch('encoded.commands.generate_ontology.search_metadata', return_value=all_ontology):
ont_list = 'all'
ontologies = go.get_ontologies(connection, ont_list)
assert len(ontologies) == 3
for ont in ontologies:
assert ont['ontology_prefix'] in prefixes
def test_get_ontologies_one(connection):
prefix = 'EFO'
with mock.patch('encoded.commands.generate_ontology.get_metadata', side_effect=get_fdn_ontology_side_effect):
ont_list = 'EFO'
ontologies = go.get_ontologies(connection, ont_list)
assert len(ontologies) == 1
assert ontologies[0]['ontology_prefix'] == prefix
def test_get_ontologies_not_in_db(connection):
prefix = 'EFO'
all_ontology.append({'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'})
with mock.patch('encoded.commands.generate_ontology.get_metadata',
return_value={'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'}):
ont_list = 'FAKE'
ontologies = go.get_ontologies(connection, ont_list)
assert not ontologies
@pytest.fixture
def slim_term_list():
# see ontology_term schema for full schema
return [{'term_id': 'a_term1', 'uuid': 'uuida1', 'is_slim_for': 'assay'},
{'term_id': 'a_term2', 'uuid': 'uuida2', 'is_slim_for': 'assay'},
{'term_id': 'd_term1', 'uuid': 'uuidd1', 'is_slim_for': 'developmental'}]
@pytest.fixture
def slim_terms_by_ont(slim_term_list):
return [
[slim_term_list[0],
slim_term_list[1]],
[slim_term_list[2]],
None,
None,
None
]
@pytest.fixture
def term_w_closure():
return {'term_id': '1', 'uuid': 'uuid1',
'closure': ['id1', 'id2', 'a_term1']}
@pytest.fixture
def terms_w_closures(term_w_closure):
# term with 2 slims
term_w_two = term_w_closure.copy()
term_w_two['term_id'] = '4'
term_w_two['uuid'] = 'uuid2'
term_w_two['closure'] = term_w_closure['closure'].copy()
term_w_two['closure'].append('a_term2')
# term w closure but no slim terms
term_wo_slim = term_w_closure.copy()
term_wo_slim['term_id'] = '5'
term_wo_slim['uuid'] = 'uuid5'
term_wo_slim['closure'] = term_w_closure['closure'].copy()
term_wo_slim['closure'].pop()
# term with both 'closure' and 'closure_with_develops_from' both with the same slim
term_with_both = term_w_closure.copy()
term_with_both['term_id'] = '3'
term_with_both['uuid'] = 'uuid3'
term_with_both['closure_with_develops_from'] = ['d_term1']
print(term_with_both)
# term with 'closure_with_develops_from' slim term'
term_cwdf = term_with_both.copy()
term_cwdf['term_id'] = '2'
term_cwdf['uuid'] = 'uuid2'
del term_cwdf['closure']
# term with no closures
term_w_none = term_cwdf.copy()
term_w_none['term_id'] = '6'
term_w_none['uuid'] = 'uuid6'
del term_w_none['closure_with_develops_from']
return [term_w_closure, term_cwdf, term_with_both,
term_w_two, term_wo_slim, term_w_none]
@pytest.fixture
def terms():
return {
'a_term1': {
'term_id': 'a_term1',
'term_name': 'name1',
'all_parents': []
},
'id2': {
'term_id': 'id2',
'term_name': 'name2',
'parents': ['a_term1', 'ObsoleteClass'],
'all_parents': ['a_term1']
},
'id3': {
'term_id': 'id3',
'term_name': 'obsolete name',
'relationships': ['id2'],
'all_parents': ['id2']
},
'id4': {
'term_id': 'id4',
'term_name': 'Obsolete name',
'relationships': ['a_term1', 'id2'],
'all_parents': ['a_term11', 'id2']
},
'd_term1': {
'term_id': 'd_term1',
'term_name': '',
'all_parents': ['id4']
},
'id6': {
'term_id': 'id6',
'develops_from': ['id7'],
'parents': ['id2'],
'all_parents': []
},
'id7': {
'term_id': 'id7',
'parents': ['d_term1'],
'all_parents': ['id6']
},
'id8': {
'term_id': 'id8',
'develops_from': ['id7', 'id3'],
'all_parents': ['id7', 'id3']
},
'id9': {
'term_id': 'id9',
'has_part_inverse': ['id3'],
'develops_from': ['id3'],
'all_parents': ['id10']
}
}
@pytest.fixture
def syn_uris():
return ['http://www.ebi.ac.uk/efo/alternative_term',
'http://www.geneontology.org/formats/oboInOwl#hasExactSynonym',
'http://purl.obolibrary.org/obo/IAO_0000118']
@pytest.fixture
def syn_uris_as_URIRef(syn_uris):
return [go.convert2namespace(uri) for uri in syn_uris]
def test_get_slim_terms(connection, slim_terms_by_ont):
present = ['developmental', 'assay']
absent = ['organ', 'system', 'cell']
test_slim_terms = slim_terms_by_ont
with mock.patch('encoded.commands.generate_ontology.search_metadata',
side_effect=test_slim_terms):
terms = go.get_slim_terms(connection)
assert len(terms) == 3
for term in terms:
assert term['is_slim_for'] in present
assert term['is_slim_for'] not in absent
def test_add_slim_to_term(terms_w_closures, slim_term_list):
slim_ids = ['a_term1', 'd_term1', 'a_term2']
for i, term in enumerate(terms_w_closures):
test_term = go.add_slim_to_term(term, slim_term_list)
assert test_term['term_id'] == str(i + 1)
if i < 2:
assert len(test_term['slim_terms']) == 1
assert test_term['slim_terms'][0] == slim_ids[i]
elif i <= 3:
assert len(test_term['slim_terms']) == 2
for t in test_term['slim_terms']:
assert t in slim_ids
elif i > 3:
assert 'slim_terms' not in test_term
def test_add_slim_terms(terms, slim_term_list):
terms = go.add_slim_terms(terms, slim_term_list)
print(terms)
for tid, term in terms.items():
if tid == 'id6':
assert len(term['slim_terms']) == 2
assert 'd_term1' in term['slim_terms']
assert 'a_term1' in term['slim_terms']
elif tid == 'id9':
assert 'slim_terms' not in term
else:
assert len(term['slim_terms']) == 1
if tid in ['a_term1', 'id2', 'id3', 'id4']:
assert term['slim_terms'][0] == 'a_term1'
elif tid in ['d_term1', 'id7', 'id8']:
assert term['slim_terms'][0] == 'd_term1'
def test_remove_obsoletes_and_unnamed_obsoletes(terms):
db_terms = []
terms['id10'] = {'term_id': 'id10', 'term_name': 'new_term that is deprecated'}
ids = ['a_term1', 'id2', 'id3', 'id4', 'd_term1', 'id6', 'id7', 'id8', 'id9', 'id10']
deprecated = 'id10'
for i in ids:
assert i in terms
terms = go.remove_obsoletes_and_unnamed(terms, deprecated, db_terms)
remaining = ids.pop(0)
assert remaining in terms
for i in ids:
assert i not in terms
def check_if_URIRef(uri):
return isinstance(uri, URIRef)
def test_convert2namespace(syn_uris):
for uri in syn_uris:
ns = go.convert2namespace(uri)
assert check_if_URIRef(ns)
assert str(ns) == uri
def test_get_syndef_terms_as_uri(syn_uris):
asrdf = [True, False]
for rdf in asrdf:
uris = go.get_syndef_terms_as_uri(all_ontology[2], 'synonym_terms', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
def test_get_synonym_term_uris_no_ontology():
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=[]):
synterms = go.get_synonym_term_uris('ontologys/FAKE')
assert not synterms
def test_get_definition_term_uris_no_ontology():
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=[]):
synterms = go.get_definition_term_uris('ontologys/FAKE')
assert not synterms
def test_get_synonym_term_uris(syn_uris, syn_uris_as_URIRef):
asrdf = [True, False]
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=syn_uris_as_URIRef):
for rdf in asrdf:
uris = go.get_synonym_term_uris('ontid', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
def test_get_definition_term_uris(syn_uris, syn_uris_as_URIRef):
asrdf = [True, False]
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=syn_uris_as_URIRef):
for rdf in asrdf:
uris = go.get_synonym_term_uris('ontid', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
@pytest.yield_fixture
def owler():
with mock.patch.object(go, 'Owler') as mocked:
yield mocked
@pytest.fixture
def returned_synonyms():
return [
[], [],
['testsyn1'], ['testsyn1'],
['testsyn1', 'testsyn2'], ['testsyn1', 'testsyn2']
]
def test_get_synonyms_and_definitions(owler, returned_synonyms):
checks = ['testsyn1', 'testsyn2']
with mock.patch('encoded.commands.generate_ontology.getObjectLiteralsOfType',
side_effect=returned_synonyms):
class_ = 'test_class'
synonym_terms = ['1']
definition_terms = ['1']
for i in range(int(len(returned_synonyms) / 2)):
synonyms = go.get_synonyms(class_, owler, synonym_terms)
definitions = go.get_definitions(class_, owler, definition_terms)
assert synonyms == definitions
if i == 0:
assert not synonyms
else:
assert len(synonyms) == i
for syn in synonyms:
assert syn in checks
def test_iterative_parents(terms):
for tid, term in terms.items():
parents = []
oks = []
if 'all_parents' in term:
parents = go.iterative_parents(term['all_parents'], terms, 'all_parents')
if tid in ['a_term1', 'id6', 'id9']:
assert not parents
if tid == 'id2':
oks = ['a_term1']
assert len(parents) == 1
if tid in ['id3', 'id4']:
oks = ['a_term1', 'id2']
assert len(parents) == 2
if tid == 'd_term1':
oks = ['a_term1', 'id2', 'id4']
assert len(parents) == 3
if tid == 'id7':
oks = ['id6']
assert len(parents) == 1
if tid == 'id8':
oks = ['id6', 'id7', 'a_term1', 'id2', 'id3']
assert len(parents) == 5
if oks:
assert [_id in oks for _id in parents]
def test_get_all_ancestors(terms):
for tid, term in terms.items():
term['development'] = term['all_parents'].copy() # adding development to all terms
for tid, term in terms.items():
term = go.get_all_ancestors(term, terms, 'all_parents')
term = go.get_all_ancestors(term, terms, 'development')
# check they're the same - no need to check both anymore
assert term['closure'] == term['closure_with_develops_from']
closure = term['closure']
okids = []
assert tid in closure # checks that the term id is included
if tid in ['a_term1', 'id6', 'id9']:
assert len(closure) == 1
if tid in ['id2', 'id7']:
assert len(closure) == 2
if tid == 'id2':
okids = ['a_term1']
else:
okids = ['id6']
if tid in ['id3', 'id4']:
assert len(closure) == 3
okids = ['a_term1', 'id2']
if tid == 'd_term1':
assert len(closure) == 4
okids = ['a_term1', 'id2', 'id4']
if tid == 'id8':
assert len(closure) == 6
okids = ['id6', 'id7', 'a_term1', 'id2', 'id3']
if okids:
assert [_id in okids for | |
# -*- coding: utf-8 -*-
import json
import logging
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels, utility, models
from .base import Model
logger = logging.getLogger(__name__)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class CombinedSingleSpeciesModel(Model):
""" 2- and 3-body single species model class
Class managing the Gaussian processes and their mapped counterparts
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma_2b (foat): Lengthscale parameter of the 2-body Gaussian process
sigma_3b (foat): Lengthscale parameter of the 2-body Gaussian process
theta_2b (float): decay ratio of the cutoff function in the 2-body Gaussian Process
theta_3b (float): decay ratio of the cutoff function in the 3-body Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp_2b (method): The 2-body single species Gaussian Process
gp_3b (method): The 3-body single species Gaussian Process
grid_2b (method): The 2-body single species tabulated potential
grid_3b (method): The 3-body single species tabulated potential
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num (int): number of points per side used to create the 2- and 3-body grid. The 3-body
grid is 3-dimensional, therefore its total number of grid points will be grid_num^3
"""
def __init__(self, element, r_cut, sigma_2b, sigma_3b, theta_2b, theta_3b, noise, rep_sig=1, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel_2b = kernels.TwoBodySingleSpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodySingleSpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_start, self.grid_num = None, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(forces)))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(energies))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(energies)+len(forces))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(confs, return_std)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncoress)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, ncores=1):
""" Build the mapped 2- and 3-body potentials.
Calculates the energy predicted by the GP for two and three atoms at all possible combination
of num distances ranging from start to r_cut. The energy for the 3-body mapped grid is
calculated only for ``valid`` triplets of atoms, i.e. sets of three distances which
form a triangle (this is checked via the triangle inequality).
The grid building exploits all the permutation invariances to reduce the number of energy
calculations needed to fill the grid.
The computed 2-body energies are stored in an array of values, and a 1D spline interpolation is created.
The computed 3-body energies are stored in a 3D cube of values, and | |
device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_out_info_error(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# dtype for info must be torch.int32
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
L = torch.empty(A.shape, dtype=dtype, device=device)
info = torch.empty(A.shape[:-2], dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "but got info with dtype Long"):
torch.linalg.cholesky_ex(A, out=(L, info))
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = floating_and_complex_types_and(torch.half, torch.bfloat16)
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*integral_types())
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in complex_types():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in | |
address = self.get_ip_address(ip_address)
if address and address.association_id:
self.conn.disassociate_address(ip_address, association_id=address.association_id)
else:
self.conn.disassociate_address(ip_address)
if len(ip_addresses) == 1:
msg = _(u'Successfully disassociated the IP from the instance.')
else:
prefix = _(u'Successfully sent request to disassociate the follow IP addresses:')
msg = u'{0} {1}'.format(prefix, ', '.join(ip_addresses))
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=self.location)
msg = _(u'Failed to disassociate the IP address from the instance.')
self.request.session.flash(msg, queue=Notification.ERROR)
return HTTPFound(location=self.location)
class InstancesJsonView(LandingPageView, BaseInstanceView):
def __init__(self, request):
super(InstancesJsonView, self).__init__(request)
self.conn = self.get_connection()
self.vpc_conn = self.get_connection(conn_type='vpc')
self.cw_conn = self.get_connection(conn_type='cloudwatch')
self.vpcs = self.get_all_vpcs()
@view_config(route_name='instances_json', renderer='json', request_method='POST')
def instances_json(self):
if not (self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
vpc_subnets = self.vpc_conn.get_all_subnets()
keypairs = self.get_all_keypairs()
security_groups = self.get_all_security_groups()
# Get alarms for instances and build a list of instance ids to optimize alarm status fetch
alarms = [alarm for alarm in self.cw_conn.describe_alarms() if 'InstanceId' in alarm.dimensions]
alarm_resource_ids = set(list(
chain.from_iterable([chain.from_iterable(alarm.dimensions.values()) for alarm in alarms])
))
instances = []
filters = {}
availability_zone_param = self.request.params.getall('availability_zone')
if availability_zone_param:
filters.update({'availability-zone': availability_zone_param})
instance_state_param = self.request.params.getall('state')
if instance_state_param:
filters.update({'instance-state-name': instance_state_param})
instance_type_param = self.request.params.getall('instance_type')
if instance_type_param:
filters.update({'instance-type': instance_type_param})
security_group_param = self.request.params.getall('security_group')
if security_group_param:
filters.update({'group-name': [self.unescape_braces(sg) for sg in security_group_param]})
root_device_type_param = self.request.params.getall('root_device_type')
if root_device_type_param:
filters.update({'root-device-type': root_device_type_param})
# Don't filter by these request params in Python, as they're included in the "filters" params sent to the CLC
# Note: the choices are from attributes in InstancesFiltersForm
ignore_params = [
'availability_zone', 'instance_type', 'state', 'security_group',
'scaling_group', 'root_device_type', 'roles']
filtered_items = self.filter_items(self.get_items(filters=filters), ignore=ignore_params)
if self.request.params.get('scaling_group'):
filtered_items = self.filter_by_scaling_group(filtered_items)
if self.request.params.get('roles'):
filtered_items = self.filter_by_roles(filtered_items)
transitional_states = ['pending', 'stopping', 'shutting-down']
elastic_ips = [ip.public_ip for ip in self.conn.get_all_addresses()]
for instance in filtered_items:
is_transitional = instance.state in transitional_states
security_groups_array = sorted({
'name': group.name,
'id': group.id,
'rules_count': self.get_security_group_rules_count_by_id(security_groups, group.id)
} for group in instance.groups)
security_group_names = [group.name for group in instance.groups] # Needed for sortable tables
if instance.platform is None:
instance.platform = _(u"linux")
has_elastic_ip = instance.ip_address in elastic_ips
exists_key = True if self.get_keypair_by_name(keypairs, instance.key_name) else False
sortable_ip = self.get_sortable_ip(instance.ip_address)
alarm_status = ''
if instance.id in alarm_resource_ids:
alarm_status = Alarm.get_resource_alarm_status(instance.id, alarms)
vpc_subnet_display = self.get_vpc_subnet_display(
instance.subnet_id, vpc_subnet_list=vpc_subnets) if instance.subnet_id else ''
sortable_subnet_zone = "{0}{1}{2}".format(vpc_subnet_display, instance.vpc_name, instance.placement)
instances.append(dict(
id=instance.id,
name=TaggedItemView.get_display_name(instance, escapebraces=False),
instance_type=instance.instance_type,
image_id=instance.image_id,
ip_address=instance.ip_address,
sortable_ip=sortable_ip,
has_elastic_ip=has_elastic_ip,
public_dns_name=instance.public_dns_name,
launch_time=instance.launch_time,
availability_zone=instance.placement,
platform=instance.platform,
root_device_type=instance.root_device_type,
security_groups=security_groups_array,
sortable_secgroups=','.join(security_group_names),
sortable_subnet_zone=sortable_subnet_zone,
key_name=instance.key_name,
exists_key=exists_key,
vpc_name=instance.vpc_name,
subnet_id=instance.subnet_id if instance.subnet_id else None,
vpc_subnet_display=vpc_subnet_display,
status=instance.state,
alarm_status=alarm_status,
tags=TaggedItemView.get_tags_display(instance.tags),
transitional=is_transitional,
running_create=True if instance.tags.get('ec_bundling') else False,
scaling_group=instance.tags.get('aws:autoscaling:groupName')
))
image_ids = [i['image_id'] for i in instances]
image_ids = list(set(image_ids))
images = self.conn.get_all_images(filters={'image-id': image_ids})
for instance in instances:
image = self.get_image_by_id(images, instance['image_id'])
image_name = None
if image:
image_name = u'{0}{1}'.format(
image.name if image.name else image.id,
u' ({0})'.format(image.id) if image.name else ''
)
instance['image_name'] = image_name
return dict(results=instances)
@view_config(route_name='instances_roles_json', renderer='json', request_method='GET')
def instances_roles_json(self):
instances = {}
iam_conn = self.get_connection(conn_type='iam')
result = iam_conn.list_instance_profiles()
instance_profiles_list = result.list_instance_profiles_response.list_instance_profiles_result.instance_profiles
for item in self.get_items():
if item.instance_profile:
arn = item.instance_profile['arn']
profile_name = arn[(arn.rindex('/') + 1):]
# look up profile in list
for profile in instance_profiles_list:
if profile.instance_profile_name == profile_name:
instances[item.id] = profile.roles.role_name
return dict(results=instances)
def get_items(self, filters=None):
if self.conn:
instances = []
with boto_error_handler(self.request):
for reservation in self.conn.get_all_reservations(filters=filters):
for instance in reservation.instances:
if instance.vpc_id:
vpc = self.get_vpc_by_id(instance.vpc_id)
instance.vpc_name = TaggedItemView.get_display_name(vpc)
else:
instance.vpc_name = ''
instances.append(instance)
return instances
return []
def get_all_vpcs(self):
return self.vpc_conn.get_all_vpcs() if self.vpc_conn else []
def get_vpc_by_id(self, vpc_id):
for vpc in self.vpcs:
if vpc_id == vpc.id:
return vpc
def get_all_keypairs(self):
return self.conn.get_all_key_pairs() if self.conn else []
def get_keypair_by_name(self, keypairs, keypair_name):
for keypair in keypairs:
if keypair_name == keypair.name:
return keypair
def get_all_security_groups(self):
return self.conn.get_all_security_groups() if self.conn else []
def get_security_group_by_id(self, security_groups, id):
for sgroup in security_groups:
if sgroup.id == id:
return sgroup
def get_security_group_rules_count_by_id(self, security_groups, id):
sgroup = self.get_security_group_by_id(security_groups, id)
if sgroup:
return len(sgroup.rules)
return None
@staticmethod
def get_image_by_id(images, image_id):
if images:
for image in images:
if image.id == image_id:
return image
return None
@staticmethod
def get_sortable_ip(ip_address):
if not ip_address:
return 0
return long("".join(["{0:08b}".format(int(num)) for num in ip_address.split('.')]), 2)
def filter_by_scaling_group(self, items):
filtered_items = []
for item in items:
autoscaling_tag = item.tags.get('aws:autoscaling:groupName')
if autoscaling_tag:
for scaling_group in self.request.params.getall('scaling_group'):
if autoscaling_tag == self.unescape_braces(scaling_group):
filtered_items.append(item)
return filtered_items
def filter_by_roles(self, items):
iam_conn = self.get_connection(conn_type="iam")
filtered_items = []
profiles = []
for role in self.request.params.getall('roles'):
instance_profiles_list = iam_conn.list_instance_profiles(
path_prefix='/' + role).list_instance_profiles_response.list_instance_profiles_result.instance_profiles
for profile in instance_profiles_list:
profiles.append(profile.instance_profile_id)
for item in items:
if len(item.instance_profile) > 0 and item.instance_profile['id'] in profiles:
filtered_items.append(item)
return filtered_items
class InstanceView(TaggedItemView, BaseInstanceView):
VIEW_TEMPLATE = '../templates/instances/instance_view.pt'
def __init__(self, request, instance=None, **kwargs):
super(InstanceView, self).__init__(request, **kwargs)
self.title_parts = [_(u'Instance'), request.matchdict.get('id'), _(u'General')]
self.conn = self.get_connection()
self.iam_conn = None
if BaseView.has_role_access(request):
self.iam_conn = self.get_connection(conn_type="iam")
self.instance = instance or self.get_instance()
self.image = self.get_image(self.instance)
self.scaling_group = self.get_scaling_group()
self.instance_form = InstanceForm(
self.request, instance=self.instance, conn=self.conn, formdata=self.request.params or None)
self.start_form = StartInstanceForm(self.request, formdata=self.request.params or None)
self.stop_form = StopInstanceForm(self.request, formdata=self.request.params or None)
self.reboot_form = RebootInstanceForm(self.request, formdata=self.request.params or None)
self.terminate_form = TerminateInstanceForm(self.request, formdata=self.request.params or None)
self.associate_ip_form = AssociateIpToInstanceForm(
self.request, conn=self.conn, instance=self.instance, formdata=self.request.params or None)
self.disassociate_ip_form = DisassociateIpFromInstanceForm(self.request, formdata=self.request.params or None)
self.tagged_obj = self.instance
self.location = self.get_redirect_location()
self.instance_name = TaggedItemView.get_display_name(self.instance)
self.security_groups_array = sorted(
{'name': group.name, 'id': group.id} for group in self.instance.groups) if self.instance else []
self.security_group_list = self.get_security_group_list()
self.security_group_list_string = ','.join(
[sgroup['id'] for sgroup in self.security_group_list]) if self.security_group_list else ''
self.instance_keypair = self.instance.key_name if self.instance else ''
self.has_elastic_ip = self.check_has_elastic_ip(self.instance.ip_address) if self.instance else False
self.role = None
if BaseView.has_role_access(request) and self.instance and self.instance.instance_profile:
arn = self.instance.instance_profile['arn']
profile_name = arn[(arn.rindex('/') + 1):]
inst_profile = self.iam_conn.get_instance_profile(profile_name)
self.role = inst_profile.roles.member.role_name if inst_profile.roles else None
self.running_create = False
if self.instance:
self.running_create = True if self.instance.tags.get('ec_bundling') else False
self.render_dict = dict(
instance=self.instance,
instance_name=self.instance_name,
instance_security_groups=self.security_group_list_string,
instance_keypair=self.instance_keypair,
instance_monitoring_state=self.get_monitoring_state(self.instance),
monitoring_tab_title=self.get_monitoring_tab_title(self.instance),
security_group_list=self.security_group_list,
image=self.image,
scaling_group=self.scaling_group,
instance_form=self.instance_form,
start_form=self.start_form,
stop_form=self.stop_form,
reboot_form=self.reboot_form,
terminate_form=self.terminate_form,
associate_ip_form=self.associate_ip_form,
disassociate_ip_form=self.disassociate_ip_form,
has_elastic_ip=self.has_elastic_ip,
vpc_subnet_display=self.get_vpc_subnet_display(self.instance.subnet_id) if self.instance else None,
role=self.role,
running_create=self.running_create,
controller_options_json=self.get_controller_options_json(),
is_vpc_supported=self.is_vpc_supported,
)
@view_config(route_name='instance_view', renderer=VIEW_TEMPLATE, request_method='GET')
def instance_view(self):
if self.instance is None:
raise HTTPNotFound()
return self.render_dict
@view_config(route_name='instance_update', renderer=VIEW_TEMPLATE, request_method='POST')
def instance_update(self):
if self.instance and self.instance_form.validate():
with boto_error_handler(self.request, self.location):
# Update tags
self.update_tags()
# Save Name tag
name = self.request.params.get('name', '')
self.update_name_tag(name)
# Update stopped instance
if self.instance.state == 'stopped':
instance_type = self.request.params.get('instance_type')
kernel = self.request.params.get('kernel')
ramdisk = self.request.params.get('ramdisk')
self.log_request(_(u"Updating instance {0} (type={1}, kernel={2}, ramidisk={3})").format(
self.instance.id, instance_type, kernel, ramdisk))
if self.instance.instance_type != instance_type:
self.conn.modify_instance_attribute(self.instance.id, 'instanceType', instance_type)
user_data = self.get_user_data()
if user_data:
self.conn.modify_instance_attribute(self.instance.id, 'userData', base64.b64encode(user_data))
if kernel != '' and self.instance.kernel != kernel:
self.conn.modify_instance_attribute(self.instance.id, 'kernel', kernel)
if ramdisk != '' and self.instance.ramdisk != ramdisk:
self.conn.modify_instance_attribute(self.instance.id, 'ramdisk', ramdisk)
# Start instance if desired
if self.request.params.get('start_later'):
self.log_request(_(u"Starting instance {0}").format(self.instance.id))
self.instance.start()
msg = _(u'Successfully modified instance')
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=self.location)
return self.render_dict
@view_config(route_name='instance_start', renderer=VIEW_TEMPLATE, request_method='POST')
def instance_start(self):
if self.instance and self.start_form.validate():
with boto_error_handler(self.request, self.location):
self.log_request(_(u"Starting instance {0}").format(self.instance.id))
# Can only start an instance if it has a volume attached
self.instance.start()
msg = _(u'Successfully sent start instance request. It may take a moment to start the instance.')
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=self.location)
return self.render_dict
@view_config(route_name='instance_stop', renderer=VIEW_TEMPLATE, request_method='POST')
def instance_stop(self):
if self.instance and self.stop_form.validate():
# Only EBS-backed instances can be stopped
if self.instance.root_device_type == 'ebs':
with boto_error_handler(self.request, self.location):
self.log_request(_(u"Stopping instance {0}").format(self.instance.id))
self.instance.stop()
msg = _(u'Successfully sent stop instance request. It may take a moment to stop the instance.')
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=self.location)
return self.render_dict
@view_config(route_name='instance_reboot', renderer=VIEW_TEMPLATE, request_method='POST')
def instance_reboot(self):
location = self.request.route_path('instance_view', id=self.instance.id)
if self.instance and self.reboot_form.validate():
with boto_error_handler(self.request, self.location):
self.log_request(_(u"Rebooting instance {0}").format(self.instance.id))
rebooted = self.instance.reboot()
msg = _(u'Successfully sent reboot request. It may take a moment to reboot the instance.')
self.request.session.flash(msg, queue=Notification.SUCCESS)
if not rebooted:
msg = _(u'Unable to reboot the instance.')
self.request.session.flash(msg, queue=Notification.ERROR)
return HTTPFound(location=location)
return self.render_dict
@view_config(route_name='instance_terminate', renderer=VIEW_TEMPLATE, request_method='POST')
def instance_terminate(self):
if self.instance and self.terminate_form.validate():
with boto_error_handler(self.request, self.location):
self.log_request(_(u"Terminating instance {0}").format(self.instance.id))
self.instance.terminate()
msg = _(
u'Successfully sent terminate instance request. It may take a moment to shut down the instance.')
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=self.location)
return self.render_dict
@view_config(route_name='instance_get_password', request_method='POST', renderer='json')
def instance_get_password(self):
if not self.is_csrf_valid():
return JSONResponse(status=400, message="missing CSRF token")
instance_id = self.request.matchdict.get('id')
with boto_error_handler(self.request, self.location):
try:
passwd_data = self.conn.get_password_data(instance_id)
priv_key_string = self.request.params.get('key')
priv_key_string = base64.b64decode(priv_key_string)
user_priv_key = RSA.load_key_string(priv_key_string)
string_to_decrypt = base64.b64decode(passwd_data)
ret = user_priv_key.private_decrypt(string_to_decrypt, RSA.pkcs1_padding)
return dict(results=dict(instance=instance_id, password=ret))
except RSA.RSAError: # likely, bad key
return JSONResponse(status=400, message=_(
u"There was a problem with the key, please try again, verifying the correct private key is used."))
@view_config(route_name='instance_associate', renderer=VIEW_TEMPLATE, | |
<reponame>ka05/tdameritrade
import pandas as pd
import os
from .session import TDASession
from .exceptions import handle_error_response, TDAAPIError
from .urls import (
# --ORDERS--
CANCEL_ORDER,
# GET_ORDER,
# GET_ORDERS_BY_PATH,
GET_ORDER_BY_QUERY,
PLACE_ORDER,
REPLACE_ORDER,
STATUS_VALUES,
# --SAVED ORDERS--
CREATE_SAVED_ORDER,
DELETE_SAVED_ORDER,
GET_SAVED_ORDER,
GET_SAVED_ORDER_BY_PATH,
REPLACE_SAVED_ORDER,
# --ACCOUNTS--
GET_ACCOUNT,
GET_ACCOUNTS,
# --AUTHENTICATION--
# ACCESS_TOKEN,
# --INSTRUMENTS--
SEARCH_INSTRUMENTS,
SEARCH_INSTRUMENT_PROJECTION,
GET_INSTRUMENT,
# --MARKET HOURS--
GET_HOURS_FOR_MULTIPLE_MARKETS,
MARKETS_VALUES,
GET_HOURS_FOR_SINGLE_MARKET,
# --MOVERS--
MOVERS,
DIRECTION_VALUES,
CHANGE_VALUES,
# --OPTION CHAINS--
GET_OPTION_CHAIN,
CONTRACT_TYPE_VALUES,
STRATEGY_VALUES,
RANGE_VALUES,
OPTION_TYPE_VALUES,
OPTION_EXPMONTH_VALUES,
# --PRICE HISTORY--
GET_PRICE_HISTORY,
PERIOD_TYPE_VALUES,
FREQUENCY_TYPE_VALUES,
# --QUOTES--
# GET_QUOTE,
GET_QUOTES,
# --TRANSACTION HISTORY--
# GET_TRANSACTION,
# GET_TRANSACTIONS,
GET_TRANSCATION_TYPE_VALUES,
# --User Info/Prefs--
GET_PREFERENCES,
# GET_STREAMER_SUBSCRIPTION_KEYS,
# GET_USER_PRINCIPALS,
# USER_PRINCIPALS_FIELDS_VALUES,
UPDATE_PREFERENCES,
# --WATCHLIST--
CREATE_WATCHLIST,
DELETE_WATCHLIST,
GET_WATCHLIST,
# GET_WATCHLISTS_MULTIPLE_ACCOUNTS,
GET_WATCHLISTS,
GET_WATCHLISTS_MULTIPLE_ACCOUNTS,
REPLACE_WATCHLIST,
UPDATE_WATCHLIST)
def response_is_valid(resp):
return resp.status_code in (200, 201)
class TDClient(object):
def __init__(self, client_id=None, refresh_token=None, account_ids=None):
self._clientId = client_id or os.environ['TDAMERITRADE_CLIENT_ID']
self._refreshToken = refresh_token or os.environ['TDAMERITRADE_REFRESH_TOKEN']
self.accountIds = account_ids or []
self.session = TDASession(self._refreshToken, self._clientId)
def _request(self, url, method="GET", params=None, *args, **kwargs):
resp = self.session.request(method, url, params=params, *args, **kwargs)
if not response_is_valid(resp):
handle_error_response(resp)
return resp
def accounts(self, positions=False, orders=False):
'''get user accounts. caches account ids in accountIds if not provided during initialization
Args:
positions (bool): include position information
orders (bool): include order information
'''
ret = {}
if positions or orders:
params = {'fields': []}
if positions:
params['fields'].append('positions')
if orders:
params['fields'].append('orders')
params['fields'] = ','.join(params['fields'])
else:
params = {}
if self.accountIds:
for acc in self.accountIds:
resp = self._request(GET_ACCOUNT.format(accountId=acc), params=params)
ret[acc] = resp.json()
else:
resp = self._request(GET_ACCOUNTS, params=params)
for account in resp.json():
ret[account['securitiesAccount']['accountId']] = account
self.accountIds = [int(accountId) for accountId in ret]
return ret
def accountsDF(self):
'''get accounts as dataframe'''
return pd.io.json.json_normalize(self.accounts())
def transactions(self, accountId=None, type=None, symbol=None, startDate=None, endDate=None):
'''get transactions by account
Args:
accountId (int): account id (defaults to client's ids)
type (str): transaction type, in ('ALL', 'TRADE', 'BUY_ONLY', 'SELL_ONLY', 'CASH_IN_OR_CASH_OUT', 'CHECKING', 'DIVIDEND', 'INTEREST', 'OTHER', 'ADVISOR_FEES')
symbol (str): transactions for given symbol
start_date (str): start date as string yyyy-MM-dd
end_date (str): end date as string yyyy-MM-dd
'''
if accountId:
accounts = [accountId]
else:
accounts = self.accountIds
if type not in GET_TRANSCATION_TYPE_VALUES:
raise TDAAPIError('Transaction type must be in {}'.format(GET_TRANSCATION_TYPE_VALUES))
ret = {}
for account in accounts:
transactions = GET_ACCOUNT.format(accountId=account) + "/transactions"
ret[account] = self._request(transactions,
params={
'type': type,
'symbol': symbol,
'startDate': startDate,
'endDate': endDate
}).json()
return ret
def transactionsDF(self, accountId=None, type=None, symbol=None, startDate=None, endDate=None):
'''get transaction information as Dataframe'''
return pd.json_normalize(self.transactions(accountId=accountId, type=type, symbol=symbol, startDate=startDate, endDate=endDate))
def search(self, symbol, projection='symbol-search'):
'''Search for a symbol
Args:
symbol (sring): string to search for
projection (string): projection to use, in ('symbol-search', 'symbol-regex', 'desc-search', 'desc-regex', 'fundamental')
'''
if projection not in SEARCH_INSTRUMENT_PROJECTION:
raise TDAAPIError('Projection must be in {}'.format(SEARCH_INSTRUMENT_PROJECTION))
return self._request(SEARCH_INSTRUMENTS,
params={'symbol': symbol,
'projection': projection}).json()
def searchDF(self, symbol, projection='symbol-search'):
'''search for symbol as a dataframe'''
ret = []
dat = self.search(symbol, projection)
for symbol in dat:
ret.append(dat[symbol])
return pd.DataFrame(ret)
def fundamentalSearch(self, symbol):
'''helper to search for a symbol using fundamental projection'''
return self.search(symbol, 'fundamental')
def fundamentalSearchDF(self, symbol):
'''helper to search for a symbol using fundamental projection and return DF'''
return self.searchDF(symbol, 'fundamental')
def instrument(self, cusip):
'''get instrument info from cusip
Args:
cusip (str): the cusip to use, can find it by looking up in search
'''
return self._request(GET_INSTRUMENT.format(cusip=cusip)).json()
def instrumentDF(self, cusip):
'''get instrument info from cusip as dataframe'''
return pd.DataFrame(self.instrument(cusip))
def quote(self, symbol):
'''get quote for symbol
Args:
symbol (str): symbol to get quote for
'''
if not isinstance(symbol, list):
symbol = [symbol]
return self._request(GET_QUOTES,
params={'symbol': [s.upper() for s in symbol]}).json()
def quoteDF(self, symbol):
'''get quote, format as dataframe'''
x = self.quote(symbol)
return pd.DataFrame(x).T.reset_index(drop=True)
def history(self, symbol,
periodType=None, period=None,
frequencyType=None, frequency=None,
endDate=None, startDate=None,
needExtendedHoursData=True):
'''get price history
Args:
symbol (str): symbol to get price history for
periodType (str): period type to request
period (int): period to use
frequencyType (str): frequency type to use
frequency (int): frequency to use
endDate (int): End date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided. Default is previous trading day.
startDate (int): Start date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided.
needExtendedHoursData (bool): true to return extended hours data, false for regular market hours only. Default is true
'''
params = {}
if periodType or period:
if periodType not in PERIOD_TYPE_VALUES:
raise TDAAPIError('Period type must be in {}'.format(PERIOD_TYPE_VALUES))
params['period'] = period
params['periodType'] = periodType
if frequencyType or frequency:
if frequencyType not in FREQUENCY_TYPE_VALUES:
raise TDAAPIError('Frequency type must be in {}'.format(FREQUENCY_TYPE_VALUES))
params['frequency'] = frequency
params['frequencyType'] = frequencyType
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
return self._request(GET_PRICE_HISTORY.format(symbol=symbol), params=params).json()
def historyDF(self, symbol, **kwargs):
'''get history as dataframe'''
x = self.history(symbol, **kwargs)
df = pd.DataFrame(x['candles'])
df['datetime'] = pd.to_datetime(df['datetime'], unit='ms')
return df
def options(self,
symbol,
contractType='ALL',
strikeCount=-1,
includeQuotes=False,
strategy='SINGLE',
interval=None,
strike=None,
range='ALL',
fromDate=None,
toDate=None,
volatility=None,
underlyingPrice=None,
interestRate=None,
daysToExpiration=None,
expMonth='ALL',
optionType='ALL'):
'''request option chain information
Args:
symbol (str): Enter one symbol
contractType (str): Type of contracts to return in the chain. Can be CALL, PUT, or ALL. Default is ALL.
strikeCount (int): The number of strikes to return above and below the at-the-money price.
includeQuotes (bool): Include quotes for options in the option chain. Can be TRUE or FALSE. Default is FALSE.
strategy (str): Passing a value returns a Strategy Chain. Possible values are SINGLE, ANALYTICAL (allows use of the volatility, underlyingPrice, interestRate, and daysToExpiration params to calculate theoretical values), COVERED, VERTICAL, CALENDAR, STRANGLE, STRADDLE, BUTTERFLY, CONDOR, DIAGONAL, COLLAR, or ROLL. Default is SINGLE.
interval (int): Strike interval for spread strategy chains (see strategy param).
strike (float): Provide a strike price to return options only at that strike price.
range (str): Returns options for the given range. Possible values are:
ITM: In-the-money
NTM: Near-the-money
OTM: Out-of-the-money
SAK: Strikes Above Market
SBK: Strikes Below Market
SNK: Strikes Near Market
ALL: All Strikes
Default is ALL.
fromDate (str): Only return expirations after this date. For strategies, expiration refers to the nearest term expiration in the strategy. Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
toDate (str): Only return expirations before this date. For strategies, expiration refers to the nearest term expiration in the strategy. Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
volatility (float): Volatility to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
underlyingPrice (float): Underlying price to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
interestRate (float): Interest rate to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
daysToExpiration (int): Days to expiration to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
expMonth (str): Return only options expiring in the specified month. Month is given in the three character format.
Example: JAN
Default is ALL.
optionType (str): Type of contracts to return. Possible values are:
S: Standard contracts
NS: Non-standard contracts
ALL: All contracts
Default is ALL.
'''
params = {'symbol': symbol}
if contractType not in CONTRACT_TYPE_VALUES:
raise TDAAPIError('Contract type must be in {}'.format(CONTRACT_TYPE_VALUES))
params['contractType'] = contractType
if strikeCount:
params['strikeCount'] = strikeCount
params['includeQuotes'] = includeQuotes
if strategy not in STRATEGY_VALUES:
raise TDAAPIError('Strategy must be in {}'.format(STRATEGY_VALUES))
params['strategy'] = strategy
if interval:
params['interval'] = interval
if strike:
params['strike'] = strike
if range not in RANGE_VALUES:
raise TDAAPIError('Range must be in {}'.format(RANGE_VALUES))
params['range'] = range
if fromDate:
params['fromDate'] = fromDate
if toDate:
params['toDate'] = toDate
if strategy == 'ANALYTICAL':
if volatility:
params['volatility'] = volatility
if underlyingPrice:
params['underlyingPrice'] = underlyingPrice
if interestRate:
params['interestRate'] = interestRate
if daysToExpiration:
params['daysToExpiration'] = daysToExpiration
if expMonth not in OPTION_EXPMONTH_VALUES:
raise TDAAPIError('Expiration month must be in {}'.format(OPTION_EXPMONTH_VALUES))
params['expMonth'] = expMonth
if optionType not in OPTION_TYPE_VALUES:
raise TDAAPIError('Option type must be in {}'.format(OPTION_TYPE_VALUES))
return self._request(GET_OPTION_CHAIN, params=params).json()
def optionsDF(self,
symbol,
contractType='ALL',
strikeCount=-1,
includeQuotes=False,
strategy='SINGLE',
interval=None,
strike=None,
range='ALL',
fromDate=None,
toDate=None,
volatility=None,
underlyingPrice=None,
interestRate=None,
daysToExpiration=None,
expMonth='ALL',
optionType='ALL'):
'''return options chain as dataframe'''
ret = []
dat = self.options(symbol=symbol,
contractType=contractType,
strikeCount=strikeCount,
includeQuotes=includeQuotes,
strategy=strategy,
interval=interval,
strike=strike,
range=range,
fromDate=fromDate,
toDate=toDate,
volatility=volatility,
underlyingPrice=underlyingPrice,
interestRate=interestRate,
daysToExpiration=daysToExpiration,
expMonth=expMonth,
optionType=optionType)
for date in dat['callExpDateMap']:
for strike in dat['callExpDateMap'][date]:
ret.extend(dat['callExpDateMap'][date][strike])
for date in dat['putExpDateMap']:
| |
<gh_stars>1-10
"""
@Author:
@Date: 10/01/2019
"""
import os
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from collections import namedtuple
from utility.json_utils import load_json
import functools
MemoryStats = namedtuple('MemoryStats',
'usage'
' hits'
' correct'
' correct_and_hit'
' correct_and_no_hit'
' samples'
' top_R_hits'
' avg_memory_percentage')
MemoryMetrics = namedtuple('MemoryMetrics',
'memory_usage'
' coverage'
' coverage_precision'
' recall'
' memory_precision'
' supervision_precision'
' non_memory_precision'
' top_R_coverage_precision'
' avg_memory_percentage')
def plot_attention_contour_v2(attention, name=None):
"""
Plots Memory Networks attention distribution as a contour plot.
"""
name = name or ""
samples = attention.shape[0]
memory_size = attention.shape[2]
figures = []
for hop in range(attention.shape[1]):
# for hop in range(1):
xpos = np.arange(memory_size)
ypos = np.arange(samples)
xpos, ypos = np.meshgrid(xpos, ypos)
zpos = np.reshape(attention[:, hop, :], newshape=(samples, memory_size))
fig, ax = plt.subplots(1, 1)
ax.set_title('{0} Hop {1}'.format(name, hop + 1))
CS = ax.contourf(xpos, ypos, zpos, 15, levels=np.arange(0, np.max(attention), np.max(attention) * 0.001),
cmap=cm.coolwarm)
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size="5%", pad=0.5, pack_start=True)
fig.add_axes(cax)
cbar = plt.colorbar(CS, cax=cax, orientation='horizontal')
# Show grid
ax.grid(True)
ax.set_xticks(np.arange(memory_size))
# Labeling
ax.set_xlabel('Memory slots')
ax.set_ylabel('Samples')
figures.append(fig)
return figures
def memory_histogram(attention_weights, model_path, filter_unfair=False,
true_values=None, memory_labels=None, attention_mode='softmax',
show_max_only=True):
"""
Plots Memory Network attention distribution as histogram.
"""
def get_predictions(attention_values, attention_mode):
if attention_mode == 'softmax':
return np.argmax(attention_values, axis=2).ravel()[:, np.newaxis]
elif attention_mode == 'sigmoid':
if not show_max_only:
attention_values = np.round(attention_values).astype(np.int32)
return np.where(attention_values)[-1][:, np.newaxis]
else:
# attention_values = np.round(attention_values).astype(np.int32)
filtered_attention_values = []
for mem_values in attention_values:
mem_indexes = [np.argmax(values) for values in mem_values if np.max(values) >= 0.5]
if mem_indexes:
filtered_attention_values.append(mem_indexes)
return filtered_attention_values
else:
raise RuntimeError('Invalid attention mode! Got: {}'.format(attention_mode))
memory_size = None
counter = None
total_unfair = 0
for idx, weight_name in enumerate(attention_weights):
sub_path = os.path.join(model_path, weight_name)
fold_name = weight_name.split('fold')[1].split('_')[1]
loaded_weights = load_json(sub_path)
if idx == 0:
counter = {slot: 0 for slot in range(loaded_weights.shape[-1])}
if memory_size is None:
memory_size = loaded_weights.shape[2]
total_unfair += np.sum(true_values[int(fold_name)])
if filter_unfair:
loaded_weights = loaded_weights[np.argwhere(true_values[int(fold_name)]).ravel()]
# selected_memories = np.argmax(loaded_weights, axis=2)
selected_memories = get_predictions(loaded_weights, attention_mode)
if len(selected_memories) > 0:
selected_memories = [list(set(item)) for item in selected_memories]
flat_selections = [item for seq in selected_memories for item in seq]
fold_counts = Counter(flat_selections)
for key, item in fold_counts.items():
counter[key] += item
print('Fold {0} counts: {1}'.format(fold_name, fold_counts))
else:
print('Fold {0} counts: {1}'.format(fold_name, {key: 0 for key in range(loaded_weights.shape[-1])}))
print('Distinct memories uses: ', counter)
print("'Total unfair: ", total_unfair)
fig, ax = plt.subplots()
ax.set_title('Memory usage', fontsize=32)
memory_indexes = np.arange(len(counter)) + 1
used_memories = np.array(list(counter.keys()))
if memory_labels is not None:
used_memories = [memory_labels[item] for item in used_memories]
counter_values = [counter[key] for key in memory_indexes - 1]
ax.bar(memory_indexes, counter_values, align='center')
ax.set_xlabel('Memory slots', fontsize=32)
ax.set_ylabel('Selections amount', fontsize=32)
ax.set_xticks(memory_indexes)
ax.set_xticklabels(used_memories)
ax.tick_params(axis='both', labelsize=24)
for idx, value in enumerate(counter_values):
if value > 0:
ax.text(idx + 0.88, value + 3, str(value), color='k', fontweight='bold', fontsize=24)
if memory_labels is not None:
for tick in ax.get_xticklabels():
tick.set_rotation(75)
return fig
def compute_trajectories_distribution(attention):
"""
Counts memory selection (hard-attention) trajectories for Memory Network models.
"""
attention = np.argmax(attention, axis=2)
# Convert to string by concatenating
str_operation = lambda seq: '-'.join(seq.astype(np.str))
attention_str = np.apply_along_axis(func1d=str_operation, axis=1, arr=attention)
counter = Counter(attention_str)
return counter
def visualize_unfair_trajectories(unfair_info, fold_name, key, aggregate=False):
"""
Counts memory selection (hard-attention) trajectories for Memory Network models.
Memory trajectories are restricted to positive samples only.
"""
str_op = lambda seq: '-'.join(seq.astype(np.str))
if not aggregate:
info_values = [item['attention'] for _, item in unfair_info.items()]
info_values = np.array(info_values)
info_values_str = np.apply_along_axis(func1d=str_op, axis=1, arr=info_values)
else:
info_values = [np.unique(item['attention']) for _, item in unfair_info.items()]
info_values_str = [str_op(item) if len(item) > 1 else str(item[0]) for item in info_values]
counter = Counter(info_values_str)
print('{0} trajectories distribution fold {1}: {2}'.format(key, fold_name, counter))
def plot_attention_trajectories(unfair_info, fold_name, key):
"""
Shows memory trajectories graphs, where nodes are selected memories (hard attention) along with
their count (number of samples that use that memory for a given memory lookup operation, i.e. hop)
"""
info_values = [item['attention'] for _, item in unfair_info.items()]
info_values = np.array(info_values)
info_values = info_values + 1
fig, ax = plt.subplots()
ax.set_title('{0}_trajectories_fold_{1}'.format(key, fold_name))
hops = np.arange(len(info_values[0])) + 1
for traj in info_values:
ax.plot(traj, hops, linewidth=1, marker='D', markersize=6, linestyle='dashed', c='r')
ax.set_xlabel('Memory slots')
ax.set_ylabel('Memory iterations')
ax.set_xticks(np.arange(np.max(info_values)) + 1)
# Annotate points
for hop in hops:
hop_counter = Counter(info_values[:, hop - 1])
for value, count in hop_counter.items():
ax.annotate(count, (value + 0.2, hop), size=16)
def plot_memory_selections(attention, width=0.35, name=None):
"""
Plots Memory Networks attention distribution as a bar plot.
"""
name = name or ""
# [#samples, #hops, memory_size]
assert len(attention.shape) == 3
memory_size = attention.shape[2]
hops = attention.shape[1]
# [#samples, #hops]
attention = np.argmax(attention, axis=2)
hop_counts = []
for hop in range(hops):
c = Counter(attention[:, hop])
hop_values = list(c.values())
hop_keys = list(c.keys())
adjusted = np.zeros(memory_size, dtype=np.int32)
for key, value in zip(hop_keys, hop_values):
adjusted[key] = value
hop_counts.append(adjusted)
hop_counts = np.array(hop_counts)
memory_indexes = np.arange(memory_size) + 1
fig, ax = plt.subplots(1, 1)
ax.set_title('{0}'.format(name))
for hop in range(hops):
if hop == 0:
ax.bar(memory_indexes, hop_counts[hop], width=width)
else:
ax.bar(memory_indexes, hop_counts[hop], width=width, bottom=np.sum(hop_counts[:hop], axis=0))
ax.set_xlabel('Memory slots')
ax.set_ylabel('Selections amount')
ax.set_xticks(memory_indexes)
return fig
def show_target_coverage(attention, test_df, category, fold_name, predictions, attention_mode='softmax',
verbose=0, R=3):
"""
Given memory targets, computes the memory target coverage. In particular, for each sample, the method
shows: (i) selected memories (hard attention), (ii) target memories, (iii) predicted label, (iv) true label.
"""
def get_hits(target, predicted):
target = set(target)
predicted = set(predicted)
intersection = predicted.intersection(target)
hits = len(intersection)
missed = target.difference(intersection)
others = predicted.difference(intersection)
return hits, missed, others
def get_predictions(attention_values, attention_mode):
if attention_mode == 'softmax':
return np.argmax(attention_values, axis=1).ravel().tolist()
elif attention_mode == 'sigmoid':
return np.where(attention_values >= 0.5)[-1].tolist()
else:
raise RuntimeError('Invalid attention mode! Got: {}'.format(attention_mode))
def get_top_K_predictions(attention_values, attention_mode, K):
if attention_mode == 'softmax':
best_indexes = np.argsort(attention_values, axis=1)[:, ::-1]
best_indexes = best_indexes[:, :K]
return best_indexes.ravel().tolist()
elif attention_mode == 'sigmoid':
best_indexes = np.argsort(attention_values, axis=1)[:, ::-1]
valid_mask = attention_values < 0.5
sorted_valid_mask = np.take_along_axis(valid_mask, best_indexes, axis=1)
best_indexes[sorted_valid_mask] = -1
best_indexes = best_indexes[:, :K]
return best_indexes.ravel().tolist()
else:
raise RuntimeError('Invalid attention mode! Got: {}'.format(attention_mode))
unfair_data = test_df[test_df[category] == 1]
# Get targets attention weights
total_usage = 0
total_hits = 0
total_top_R_hits = [0] * R
total_correct = 0
total_correct_and_hit = 0
total_correct_and_no_hit = 0
avg_memory_percentage = 0
for row_id, row in unfair_data.iterrows():
target_id = row_id
target_values = row['{}_targets'.format(category)]
target_values = [int(item) for item in target_values[1:-1].split(',')]
predicted_label = predictions[target_id]
true_label = 1
predicted_values = get_predictions(attention[target_id], attention_mode)
hits, missed, others = get_hits(target=target_values, predicted=predicted_values)
if len(predicted_values) > 0:
total_usage += 1
top_R_predictions = get_top_K_predictions(attention[target_id], attention_mode=attention_mode, K=R)
total_top_R_hits = [count + 1 if len(set(top_R_predictions[:idx + 1]).intersection(set(target_values)))
else count
for idx, count in enumerate(total_top_R_hits)]
avg_memory_percentage += len(set(predicted_values)) / attention.shape[-1]
if hits > 0:
total_hits += 1
total_correct_and_hit += int(predicted_label == true_label)
else:
total_correct_and_no_hit += int(predicted_label == true_label)
total_correct += int(predicted_label == true_label)
if verbose:
print('*' * 20)
print('Sample ', target_id, ': ', row.text)
print('Hits: ', hits,
' Missed: ', missed,
' Others: ', others,
'Predicted Label: ', predicted_label,
'True Label: ', true_label)
print('*' * 20)
total_top_R_hits = np.array(total_top_R_hits)
if verbose:
memory_usage = total_usage / unfair_data.shape[0]
coverage = total_hits / unfair_data.shape[0]
accuracy = total_correct / unfair_data.shape[0]
supervision_accuracy = total_correct_and_hit / unfair_data.shape[0]
non_memory_accuracy = (total_correct - total_correct_and_hit - total_correct_and_no_hit) / \
unfair_data.shape[0]
try:
coverage_precision = total_hits / total_usage
top_R_coverage_precision = total_top_R_hits / total_hits
memory_accuracy = total_correct_and_hit / total_usage
fold_memory_percentage = avg_memory_percentage / total_usage
except ZeroDivisionError:
coverage_precision = None
top_R_coverage_precision = None
memory_accuracy = None
fold_memory_percentage = None
print('Fold {0} stats:\n'
' Memory usage: {1}/{2} ({3})\n'
' Coverage (correct memory over all samples): {4}/{2} ({5})\n'
' Coverage precision (correct memory over memory usage): {4}/{1} ({6})\n'
' Top R coverage precision (correct memory and no other memories over memory usage): {14}/{4} ({15})',
' Precision (correct over all samples): {7}/{2} ({8})\n'
' Memory precision (correct and correct memory over memory usage: {9}/{1} ({10})\n'
' Supervision precision (correct and correct memory over all samples): {9}/{2} ({11})\n'
' Non-memory precision (correct and no memory over all samples): {12}/{2} ({13})\n'
' Average memory selection amount (percentage): {16}\n'
.format(fold_name,
total_usage,
unfair_data.shape[0],
memory_usage,
total_hits,
coverage,
coverage_precision,
total_correct,
accuracy,
total_correct_and_hit,
memory_accuracy,
supervision_accuracy,
total_correct - total_correct_and_hit - total_correct_and_no_hit,
non_memory_accuracy,
total_top_R_hits,
top_R_coverage_precision,
fold_memory_percentage))
stats = MemoryStats(total_usage, total_hits, total_correct, total_correct_and_hit, total_correct_and_no_hit,
unfair_data.shape[0], total_top_R_hits, avg_memory_percentage)
return stats
def show_voting_coverage(K_attentions, test_df, category, fold_name, K_predictions,
attention_mode='softmax', verbose=0, R=3):
# K_attention: [K, #samples, hops, mem_size]
# | |
<reponame>pyspace/pyspace<filename>pySPACE/tests/utils/data/test_data_generation.py<gh_stars>10-100
""" Data generation facilities to test algorithms or node chains e.g. in unittests """
import numpy
import pylab
import scipy
import abc
import warnings
from pySPACE.resources.data_types.time_series import TimeSeries
###################################################################
class DataGenerator(object):
""" Abstract base class for data generation for different test data patterns
To implement an arbitrary data generation class,
subclass from this class
and override the method generate() .
This can be sine waves, different types of noise, etc.
"""
def __init__(self,sampling_frequency=1.,
*args,**kwargs):
self.__sampling_frequency = sampling_frequency
def set_sampling_frequency(self,sampling_frequency):
self.__sampling_frequency = sampling_frequency
def get_sampling_frequency(self):
return self.__sampling_frequency
sampling_frequency = property(get_sampling_frequency, set_sampling_frequency)
__metaclass__ = abc.ABCMeta
def __call__(self):
""" Helper function that returns, how often it was called"""
try:
self.__getattribute__("index")
except AttributeError:
self.index = 0
temp = self.generate()
self.index += 1
return temp
def next_channel(self):
""" Goes to the next channel"""
pass
@abc.abstractmethod
def generate(self):
pass
# several different test data generation functions
class Zero(DataGenerator):
""" Helper function for data generation that simply returns zero"""
def __init__(self,*args,**kwargs):
super(Zero,self).__init__(*args,**kwargs)
def generate(self):
return 0.0
class One(DataGenerator):
""" Helper function for data generation that simply returns one"""
def __init__(self,*args,**kwargs):
super(One,self).__init__(*args,**kwargs)
def generate(self):
return 1.0
class Constant(DataGenerator):
""" Helper function for data generation that simply returns one"""
def __init__(self,value,*args,**kwargs):
self.value = value
super(Constant,self).__init__(*args,**kwargs)
def generate(self):
return self.value
class Counter(DataGenerator):
""" Counts the number of calls and returns the value"""
def __init__(self,start=0,*args,**kwargs):
self.index = start
super(Counter,self).__init__(*args,**kwargs)
def generate(self):
return self.index
class Channel(DataGenerator):
""" Generated the number of the actual channel"""
def __init__(self,
num_channels,
num_time_pts,
*args,
**kwargs):
self.num_channels = num_channels
self.num_time_pts = num_time_pts
super(Channel,self).__init__(*args,**kwargs)
def generate(self):
return self.index / self.num_time_pts
class TimePoint(DataGenerator):
""" Generated the index of the actual time point"""
def __init__(self,
num_channels,
num_time_pts,
*args,
**kwargs):
self.num_channels = num_channels
self.num_time_pts = num_time_pts
super(TimePoint,self).__init__(*args,**kwargs)
def generate(self):
return self.index % self.num_channels
class Triangle(DataGenerator):
""" Generates a triangle with a given width and height
"""
def __init__(self,width,height,*args,**kwargs):
self.width = numpy.double(width)
self.height = numpy.double(height)
super(Triangle,self).__init__(*args,**kwargs)
def generate(self):
buffer = numpy.mod(self.index,self.width)
if buffer <= self.width/2.:
buffer /= self.width / 2
else:
buffer = (self.width - buffer)/(self.width/2)
return self.height * buffer
class GaussianNoise(DataGenerator):
""" Generates normal distributed noise"""
def __init__(self, mean=0., std=1.,
seed = None, *args, **kwargs):
self.mean = numpy.double(mean)
self.std = numpy.double(std)
if seed != None:
numpy.random.seed(seed)
super(GaussianNoise,self).__init__(*args,**kwargs)
def generate(self):
return scipy.randn() * self.std + self.mean
class Sine(DataGenerator):
""" Generates a sine wave """
def __init__(self,phase=0.0,frequency=1.,amplitude=1.,sampling_frequency=1.,*args,**kwargs):
self.phase = phase
self.frequency = frequency
self.amplitude = amplitude
super(Sine,self).__init__(sampling_frequency=sampling_frequency,
*args,**kwargs)
def generate(self):
t = 2.0 * numpy.pi * self.index * self.frequency / self.sampling_frequency + self.phase
return self.amplitude * numpy.sin(t)
class ChannelDependentSine(Sine):
""" Generates a sine wave with channel scaled frequency"""
def __init__(self,*args,**kwargs):
self.channel_index = 1
super(ChannelDependentSine, self).__init__(*args,**kwargs)
def next_channel(self):
""" Goes to the next channel"""
self.channel_index += 1
self.frequency = self.channel_index
class Cosine(DataGenerator):
""" Generates a cosine wave """
def __init__(self,phase=0.0,frequency=1.,amplitude=1.,sampling_frequency=1.,*args,**kwargs):
self.phase = phase
self.frequency = frequency
self.amplitude = amplitude
super(Cosine).__init__(sampling_frequency=sampling_frequency,
*args,**kwargs)
def generate(self):
t = 2.0 * numpy.pi * self.index * self.frequency / self.__sampling_frequency + self.phase
return self.amplitude * numpy.cos(t)
class ChannelDependentCosine(Sine):
""" Generates a cosine wave with channel scaled frequency"""
def __init__(self,*args,**kwargs):
self.channel_index = 1
super(ChannelDependentCosine, self).__init__(*args,**kwargs)
def next_channel(self):
""" Goes to the next channel"""
self.channel_index += 1
self.frequency = self.channel_index
class Delta(Sine):
""" Generates a delta impulse, i.e. 1 if t==-k, 0 else """
def __init__(self, k=0, *args, **kwargs):
self.k = k
super(Delta, self).__init__(*args,**kwargs)
def generate(self):
if self.index == -self.k:
return 1
else:
return 0
class ChannelDependentDelta(Delta):
""" Generates a sine wave with channel scaled frequency"""
def __init__(self,*args,**kwargs):
self.channel_index = 1
super(ChannelDependentDelta, self).__init__(k=0, *args, **kwargs)
def next_channel(self):
""" Goes to the next channel"""
self.k -= 2 # to have difference between channels and
self.index = 0
self.channel_index += 1
def generate(self):
if self.index == -self.k:
return self.channel_index
else:
return 0
class Combiner(DataGenerator):
""" Combines several generators"""
def __init__(self,generator_list=[],*args,**kwargs):
self.generator_list = generator_list
super(Combiner, self).__init__(*args,**kwargs)
def add_generator(self,generator):
self.generator_list.append(generator)
def set_sampling_frequency(self,sampling_frequency):
self.__sampling_frequency = sampling_frequency
for gen in self.generator_list:
gen.sampling_frequency = sampling_frequency
def get_sampling_frequency(self):
return self.__sampling_frequency
sampling_frequency = property(get_sampling_frequency, set_sampling_frequency)
class Adder(Combiner):
""" Combines several signal by adding them together"""
def __init__(self,generator_list=[],*args,**kwargs):
super(Adder, self).__init__(generator_list, *args,**kwargs)
def generate(self):
datum = 0
for generator in self.generator_list:
datum += generator()
return datum
class Multiplier(Combiner):
""" Combines several signal by adding them together"""
def __init__(self,generator_list=[],*args,**kwargs):
super(Multiplier, self).__init__(generator_list, *args,**kwargs)
def generate(self):
datum = 1
for generator in self.generator_list:
datum *= generator()
return datum
class TestTimeSeriesGenerator(object):
""" Helper class to generate time series objects e.g. by DataGenerator classes
.. todo:: Documentation is wrong.
.. todo:: Fix dependencies and function names.
Why no inheritance from DataGenerator?
Why use of generate_test_data instead of generate?
"""
def init(self,**kwargs):
pass
def generate_test_data(self,
channels=1,
time_points=100,
function=Sine(phase=0.0, frequency=2., amplitude=1.),
sampling_frequency=1000,
channel_order=True,
channel_names=None,
dtype=numpy.float):
"""
A method which generates a signal for testing, with
the specified number of "channels" which are all generated using
the given function.
**Keyword arguments**
:channels: number of channels
:time_points: number of time points
:function: the function used for sample generation
:sampling_frequency: the frequency which is used for sampling,
e.g. the signal corresponds to a time frame of
time_points/sampling frequency
:channel_names: the names of the channels (alternative to the
channels parameter, if not None, it also specifies
the number of channels)
:channel_order: the channel values are computed first, use False
for first computation of the row values
:dtype: data type of the array
"""
if channel_names:
if len(channel_names) != channels:
channels = len(channel_names)
warnings.warn("Ambiguous number of channels in TestTimeSeriesGenerator")
else:
channel_names = [("test_channel_%s" % i) for i in range(channels)]
#Generate an empty ndarray
data = numpy.zeros((time_points, channels),dtype=dtype)
if channel_order:
#Compute the values for all channels
for channel_index in xrange(channels):
for time_index in xrange(time_points):
data[time_index, channel_index] = function()
function.next_channel()
else:
for time_index in xrange(time_points):
for channel_index in xrange(channels):
data[time_index, channel_index] = function()
#Generate a time series build out of the data
test_data = TimeSeries(input_array = data,
channel_names = channel_names,
sampling_frequency = sampling_frequency,
start_time = 0,
end_time = float(time_points) / sampling_frequency )
return test_data
def generate_test_data_simple(self,
channels,
time_points,
function,
sampling_frequency,
initial_phase = 0.0):
"""
A method which generates a signal by using function for testing, with
the specified number of "channels" which are all generated using
the given function.
**Keyword arguments**
:channels: number of channels
:time_points: number of time points
:function: the function used for sample generation
:sampling_frequency: the frequency which is used for sampling,
e.g. the signal corresponds to a time frame
of time_points/sampling frequency
"""
#Generate an empty ndarray
data = numpy.zeros((time_points, channels))
#Compute the values for all channels
for channel_index in range(channels):
for time_index in range(time_points):
data[time_index, channel_index] = function(time_index / sampling_frequency + initial_phase)
#Generate a time series build out of the data
test_data = TimeSeries(
input_array=data,
channel_names=[("test_channel_%s" % i) for i in range(channels)],
sampling_frequency=sampling_frequency,
start_time=initial_phase,
end_time=float(time_points) / sampling_frequency + initial_phase)
return test_data
def add_to_test_data_single_channel(self, time_series, channel_index, function):
(num_time_points,num_channels) = time_series.shape
sampling_frequency = time_series.sampling_frequency
for time_index in range(num_time_points):
time_series[time_index, channel_index] = function( time_index/sampling_frequency )
def add_to_test_data(self, time_series, function):
"""
Function to add an additional signal generated by function to an existing time series
**Keyword arguments**
:timeSeries: the time series object
:function: function to generate signal
"""
(num_time_points,num_channels) = time_series.shape
for channel_index in range(num_channels):
self.add_to_test_data_single_channel(time_series, channel_index, function)
def generate_normalized_test_data(self,
channels,
time_points,
function,
sampling_frequency,
initial_phase=0.0):
"""
A method which generates a normalized (mu = 0, sigma =1) signal for testing, with
the specified number of "channels" which are all generated using the given function
"""
#Generate an empty ndarray
data = numpy.zeros((time_points, channels))
#Compute the values for all channels
for channel_index in range(channels):
for time_index in range(time_points):
data[time_index, channel_index] = function(2.0 * numpy.pi * (channel_index + 1) * (time_index / sampling_frequency + | |
<filename>joint_monophyler.py
import numpy as np
from scipy.special import comb
from collections import Counter
from scipy.linalg import expm
from numpy.linalg import solve
from ete3 import Tree
from discreteMarkovChain import markovChain
import time
import sys
import pandas
import csv
# This function takes an input state and generates all possible output states
# that result from a single coalescence event.
def generate_onestep_descendents(state):
onestep_descendents = []
# Perform all possible single coalescences
for i in np.nonzero(state)[0]: # These are the "free" lineages
if(i == 0):
if(state[0] > 1):
newstate = list(state)
newstate[0] = newstate[0] - 1
newstate = tuple(newstate)
onestep_descendents.append(newstate)
else: # these are the intraspecies coalescences
newstate = list(state)
newstate[i] = newstate[i] - 1
newstate[i-1] = newstate[i-1] + 1
newstate = tuple(newstate)
onestep_descendents.append(newstate)
return(tuple(onestep_descendents))
# This function generates all possible output states from a set of input states
def generate_output_states(total_states, states_to_loop_through):
# Loop through all newly generated states
to_add = []
for state in states_to_loop_through:
all_onestep = generate_onestep_descendents(state)
# If there's only one descendent of this state, we must untuple it
if(len(all_onestep) > 0):
if(len(all_onestep) == 1):
to_add.append(all_onestep[0])
else:
to_add.extend(all_onestep)
# Then we add all of these one-step descendents to the total_states
# set as well as:
new_states = [st for st in to_add if st not in total_states]
if(len(new_states) == 0):
return
else:
total_states.update(new_states) # I hope this handles empty sets!
generate_output_states(total_states, new_states)
return
# This function checks to see if state2 can be obtained from an interspecies
# coalescence in state1
def check_interspecies(state1, state2):
checkstate = list(state1)
checkstate[0] = checkstate[0] - 1
if(checkstate == list(state2)):
return(True)
else:
return(False)
# This function checks to see if state2 can be obtained from an
# intraspecies coalescence in state1, returns the index if so
def check_intraspecies(state1, state2):
diff = np.subtract(state2, state1)
# This diff should be 0 until a 1 immediately followed by a -1 and then more
# zeros
counted = Counter(diff)
negones = counted[-1]
zeros = counted[0]
ones = counted[1]
tot = zeros + ones + negones
if(tot == len(state1) and ones == 1 and negones == 1 and np.where(diff == 1)[0] == np.where(diff == -1)[0] - 1):
return(np.where(diff == -1)[0])
else:
return(-1)
# This function generates the Q matrix (by first generating the transition matrix) for a set of states
def generate_transition_matrix(states):
numstates = len(states)
statesize = len(states[0])
tm = np.zeros((numstates+1, numstates+1)) # including the failure state
failure_index = numstates # noting that we start at 0
tm[failure_index, failure_index] = 0 # This is an absorbing state
for i in range(numstates):
state1 = states[i]
N = sum([(m+1)*state1[m] for m in range(statesize)])
for j in range(numstates):
state2 = states[j]
if state1 != state2:
if(check_interspecies(state1, state2)):
tm[i, j] = comb(state1[0], 2)
else:
index = check_intraspecies(state1, state2)
if(index != -1):
index = index[0]
if(index > -1):
tm[i, j] = state1[index]*comb(index + 1, 2)
if(state1[0] != sum(state1)): # This excluded case is a success state and cannot fail.
tm[i, failure_index] = comb(N, 2)-comb(state1[0], 2)-sum([state1[k]*comb(k+1, 2) for k in range(1, statesize)])
if(state1[0] == sum(state1) and state1[0] == 1):
tm[i, i] = 1 # This is an absorbing state
# Now we create the diagonal elements. subtract the rowsums
tm[i,i] = tm[i,i] - sum(tm[i]) # Turns this into a Q matrix
return(tm)
# This function maps the input state probabilities to their corresponding
# positions in the output state vector, which is what is used with the transition
# matrix
def map_input_state_probabilities(input_states, input_state_probabilities, output_states_list):
mapped_input_state_probabilities = [0] * len(output_states_list)
for oind in range(len(output_states_list)):
os = output_states_list[oind]
for iind in range(len(input_states)):
iis = input_states[iind]
if(iis == os):
isp = input_state_probabilities[iind]
mapped_input_state_probabilities[oind] = isp
return(mapped_input_state_probabilities)
# This function takes a list of input states (as a list of tuples) and a list of input state probabilities and
# computes the output state distribution through the transition matrix
# the list of input states DOES NOT INCLUDE the failure probability, which we track separately for simplicity
def compute_output_state_distribution(input_states, input_state_probabilities, branch_length, failure_prob):
output_states_set = set(input_states)
generate_output_states(output_states_set, input_states)
output_states_list = list(output_states_set)
mat = generate_transition_matrix(output_states_list)
mapped_input_state_probabilities = map_input_state_probabilities(input_states, input_state_probabilities, output_states_list)
mapped_input_state_probabilities.append(failure_prob)
if(branch_length == 'root'):
# # First we make this back into a transition matrix I'm not sure this whole block is at all needed
# mata = np.array(mat)
# np.fill_diagonal(mata, 0)
# rowsums = np.sum(mata, 1)
# for r in np.where(rowsums == 0):
# mata[r,r] = 1
# for row in range(mata.shape[0]):
# mata[row] /= np.sum(mata[row])
pi1 = obtain_steady_state_with_matrix_exponential(mat, mapped_input_state_probabilities)
output_state_probabilities = pi1
else:
output_state_probabilities = np.dot(mapped_input_state_probabilities, expm(mat*branch_length)) # Note that this includes the failure probability at the end
output_state_probabilities_nofail = output_state_probabilities[0:(len(output_state_probabilities)-1)]
new_failure_prob = output_state_probabilities[-1]
#print(output_states_list)
#print(output_state_probabilities_nofail)
#print(new_failure_prob)
#print(sum(input_state_probabilities)+failure_prob, failure_prob, sum(mapped_input_state_probabilities), sum(output_state_probabilities))
return([output_states_list, output_state_probabilities_nofail, new_failure_prob])
# This function takes two sets of input states and their corresponding probabilities and stitches them together.
def stitch_input_states(left_input_states, left_input_state_probabilities, right_input_states, right_input_state_probabilities, left_failure_prob, right_failure_prob):
# Note that because the labels are species-specific, no non-mixed labels are shared between the daughter nodes
# And the free mixed labels are each unique. So we can just add the states together
stitched_states_precombine = []
stitched_probs_precombine = []
for lind in range(len(left_input_states)):
for rind in range(len(right_input_states)):
# I think the easiest way to do this is to clunkily do all of them and then combine like states
stitched_state = map(sum, zip(left_input_states[lind], right_input_states[rind]))
stitched_prob = left_input_state_probabilities[lind] * right_input_state_probabilities[rind]
stitched_states_precombine.append(stitched_state)
stitched_probs_precombine.append(stitched_prob)
# OK so we're just going to have to brute force it
stitched_states = []
stitched_probs = []
for stindex in range(len(stitched_states_precombine)):
stst = stitched_states_precombine[stindex]
ststt = tuple(stst)
stprob = stitched_probs_precombine[stindex]
if(ststt not in stitched_states):
stitched_states.append(ststt)
stitched_probs.append(stprob)
else:
match_index = [x for x in range(len(stitched_states)) if stitched_states[x] == ststt][0]
stitched_probs[match_index] = stitched_probs[match_index] + stprob
failure_prob = left_failure_prob*(1-right_failure_prob) + right_failure_prob*(1-left_failure_prob) + right_failure_prob*left_failure_prob
return([stitched_states, stitched_probs, failure_prob])
# This function adds sample sizes to a species tree structure
def add_samples(t, samplenames, samples):
for node in t.traverse('postorder'):
if node.name in samplenames:
node.add_features(samples = [samples[x] for x in range(len(samples)) if samplenames[x] == node.name])
# This function is from <NAME>: https://vknight.org/blog/posts/continuous-time-markov-chains/
def is_steady_state(state, Q):
"""
Returns a boolean as to whether a given state is a steady
state of the Markov chain corresponding to the matrix Q
"""
return np.allclose((state @ Q), 0)
# This function is from <NAME>: https://vknight.org/blog/posts/continuous-time-markov-chains/
def obtain_steady_state_with_matrix_exponential(Q, start, max_t=100):
"""
Solve the defining differential equation until it converges.
- Q: the transition matrix
- max_t: the maximum time for which the differential equation is solved at each attempt.
"""
state = start
while not is_steady_state(state=state, Q=Q):
state = state @ expm(Q * max_t)
return state
# This function performs the recursion
def get_node_output(node, maxsamples):
if node.is_leaf():
input_state = [0]*max(samples)
input_state[node.samples[0]-1] = 1
input_state = tuple(input_state)
input_state_list = [input_state]
input_state_probabilities = [1]
branch_length = node.dist
input_failure_prob = 0
output = compute_output_state_distribution(input_state_list, input_state_probabilities, branch_length, input_failure_prob)
return(output)
else:
left_input_structure = get_node_output(node.children[0], maxsamples)
right_input_structure = get_node_output(node.children[1], maxsamples)
left_input_states = left_input_structure[0]
right_input_states = right_input_structure[0]
left_input_probs = left_input_structure[1]
right_input_probs = right_input_structure[1]
left_failure_prob = left_input_structure[2]
right_failure_prob = right_input_structure[2]
inputs = stitch_input_states(left_input_states, left_input_probs, right_input_states, right_input_probs, left_failure_prob, right_failure_prob)
input_state_list = inputs[0]
input_state_probabilities = inputs[1]
input_failure_prob = inputs[2]
if node.is_root():
branch_length = 'root' # note that this just runs until convergence. there should be a better way
else:
branch_length = node.dist
output = compute_output_state_distribution(input_state_list, input_state_probabilities, branch_length, input_failure_prob)
return(output)
inputstring = sys.argv[1]
if(inputstring == '-m' ):
t = Tree(sys.argv[2])
tsamplenames = sys.argv[3]
tsamples = sys.argv[4]
#t = Tree("(A:0.0019,(B:0.0004,C:0.00035):0.0016);") # example to test
samplenames = tsamplenames.split(',')
csamples = tsamples.split(',')
samples = [int(s) for s in csamples]
maxsamples = max(samples)
add_samples(t, samplenames, samples)
finalout = get_node_output(t, maxsamples)
print(1-finalout[2])
elif(inputstring == '-f'):
treefilename = sys.argv[2]
samplenamefilename = sys.argv[3]
samplefilename = sys.argv[4]
outputfilename = sys.argv[5]
treelist = []
samplenamelist = []
samplelist = []
with open(treefilename, newline='') as f1:
reader = csv.reader(f1, delimiter = ' ')
for row in reader:
treelist.append(row)
with open(samplenamefilename, newline='') as f2:
reader = csv.reader(f2, delimiter = ' ')
for row in reader:
samplenamelist.append(row)
with open(samplefilename, newline='') as f3:
reader = csv.reader(f3, delimiter = ' ')
for row in reader:
samplelist.append(row)
f1.close()
f2.close()
f3.close()
with open(outputfilename,'w', newline='') as f4:
writer = csv.writer(f4)
| |
# The copyright in this software is being made available under the BSD License,
# included below. This software may be subject to other third party and contributor
# rights, including patent rights, and no such rights are granted under this license.
#
# Copyright (c) 2015, Dash Industry Forum.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of Dash Industry Forum nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from dash_test_util import *
from dashlivesim.dashlib import dash_proxy
from dashlivesim.dashlib import mpdprocessor
class TestMPDProcessing(unittest.TestCase):
"Test of MPD parsing"
def setUp(self):
self.oldBaseUrlState = mpdprocessor.SET_BASEURL
mpdprocessor.SET_BASEURL = False
def tearDown(self):
mpdprocessor.SET_BASEURL = self.oldBaseUrlState
def testMPDhandling(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
self.assertTrue(d.find("<BaseURL>http://streamtest.eu/pdash/testpic/</BaseURL>") > 0)
def testMPDwithChangedAST(self):
"Put AST to 1200s later than epoch start. There should be no PTO and startNumber=0 still."
testOutputFile = "start.mpd"
rm_outfile(testOutputFile)
urlParts = ['pdash', 'start_1200', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
self.assertTrue(d.find('availabilityStartTime="1970-01-01T00:20:00Z"') > 0)
self.assertTrue(d.find('startNumber="0"') > 0)
self.assertTrue(d.find('presentationTimeOffset') < 0)
def testMPDwithStartandDur(self):
urlParts = ['pdash', 'start_1200', 'dur_600', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=900)
d = dp.handle_request()
if dash_proxy.PUBLISH_TIME:
self.assertTrue(d.find('publishTime="1970-01-01T00:15:00Z"') > 0)
self.assertTrue(d.find('availabilityEndTime="1970-01-01T00:30:00Z"') > 0)
def testMPDwithStartand2Durations(self):
urlParts = ['pdash', 'start_1200', 'dur_600', 'dur_300', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=900)
d = dp.handle_request()
if dash_proxy.PUBLISH_TIME:
self.assertTrue(d.find('publishTime="1970-01-01T00:15:00Z"') > 0)
self.assertTrue(d.find('availabilityEndTime="1970-01-01T00:30:00Z"') > 0)
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=1795)
d = dp.handle_request()
if dash_proxy.PUBLISH_TIME:
self.assertTrue(d.find('publishTime="1970-01-01T00:29:00Z"') > 0)
self.assertTrue(d.find('availabilityEndTime="1970-01-01T00:35:00Z"') > 0)
def testHttpsBaseURL(self):
"Check that protocol is set to https if signalled to DashProvider."
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'testpic', 'Manifest.mpd']
is_https = 1
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0,
is_https=is_https)
d = dp.handle_request()
self.assertTrue(d.find("<BaseURL>https://streamtest.eu/pdash/testpic/</BaseURL>") > 0)
def test_location_for_rel_times(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'startrel_-20', 'stoprel_40', 'testpic',
'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=1000)
d = dp.handle_request()
self.assertTrue(d.find(
'availabilityStartTime="1970-01-01T00:16:18Z"') > 0)
self.assertTrue(d.find('startNumber="0"') > 0)
self.assertTrue(d.find("<BaseURL>") < 0)
self.assertTrue(
d.find('<Location>http://streamtest.eu/pdash/start_978/stop_1044/'
'testpic/Manifest.mpd</Location>') > 0)
def test_location_for_rel_times_zero_offset(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'startrel_-20', 'stoprel_40', 'timeoffset_0',
'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=1000)
d = dp.handle_request()
self.assertTrue(d.find(
'availabilityStartTime="1970-01-01T00:16:18Z"') > 0)
self.assertTrue(d.find('startNumber="163"') > 0)
self.assertTrue(d.find('presentationTimeOffset="978"') > 0)
self.assertTrue(d.find("<BaseURL>") < 0)
self.assertTrue(
d.find('<Location>http://streamtest.eu/pdash/start_978/stop_1044/'
'timeoffset_0/testpic/Manifest.mpd</Location>') > 0)
def test_absolute_times(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'start_978', 'stop_1044', 'testpic',
'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=1000)
d = dp.handle_request()
self.assertTrue(d.find(
'availabilityStartTime="1970-01-01T00:16:18Z"') > 0)
self.assertTrue(d.find("<BaseURL>") > 0)
self.assertTrue(d.find('<Location>') < 0)
self.assertTrue(d.find('type="dynamic"') > 0)
self.assertTrue(d.find('mediaPresentationDuration="PT1M6S') > 0)
self.assertTrue(d.find('minimumUpdatePeriod') > 0)
def test_absolute_times_after_stop(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'start_978', 'stop_1044', 'testpic',
'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=1046)
d = dp.handle_request()
self.assertTrue(d.find(
'availabilityStartTime="1970-01-01T00:16:18Z"') > 0)
self.assertTrue(d.find('type="static"') > 0)
self.assertTrue(d.find('mediaPresentationDuration="PT1M6S') > 0)
self.assertTrue(d.find('minimumUpdatePeriod') < 0)
class TestInitSegmentProcessing(unittest.TestCase):
def testInit(self):
urlParts = ['pdash', 'testpic', 'A1', 'init.mp4']
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
self.assertEqual(len(d), 651)
class TestMediaSegments(unittest.TestCase):
def testMediaSegmentForTfdt32(self):
testOutputFile = "t1.m4s"
rm_outfile(testOutputFile)
now = 2101 # 1s after start of segment 350
segment = "349.m4s"
urlParts = ['pdash', 'tfdt_32', 'testpic', 'A1', segment]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=now)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
self.assertEqual(len(d), 39517)
def testMediaSegmentTooEarly(self):
urlParts = ['pdash', 'testpic', 'A1', '5.m4s'] # Should be available after 36s
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=34)
d = dp.handle_request()
self.assertEqual(d['ok'], False)
def testMediaSegmentTooEarlyWithAST(self):
urlParts = ['pdash', 'start_6', 'testpic', 'A1', '0.m4s'] # Should be available after 12s
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=10)
d = dp.handle_request()
self.assertEqual(d['ok'], False)
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=14)
d = dp.handle_request()
self.assertEqual(len(d), 40346) # A full media segment
def testMediaSegmentBeforeTimeShiftBufferDepth(self):
now = 1356999060
segment = "%d.m4s" % ((now-330)/6)
urlParts = ['pdash', 'testpic', 'A1', segment]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=now)
d = dp.handle_request()
self.assertEqual(d['ok'], False)
def testLastMediaSegment(self):
"""With total duration of 2100, the last segment shall be 349
(independent of start) and available at 4101 start+dur_1800+dur_300."""
urlParts = ['pdash', 'start_2000', 'dur_1800', 'dur_300', 'testpic', 'A1', '349.m4s']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=4101)
d = dp.handle_request()
#print "LMSG at %d" % d.find("lmsg")
self.assertEqual(d.find("lmsg"), 24)
def testMultiPeriod(self):
testOutputFile = "multiperiod.mpd"
rm_outfile(testOutputFile)
urlParts = ['pdash', 'periods_10', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=3602)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
periodPositions = findAllIndexes("<Period", d)
self.assertEqual(len(periodPositions), 2)
def testContinuous(self):
testOutputFile = "ContMultiperiod.mpd"
rm_outfile(testOutputFile)
urlParts = ['pdash', 'continuous_1', 'periods_10', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=3602)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
periodPositions = findAllIndexes("urn:mpeg:dash:period_continuity:2014", d)
self.assertGreater(len(periodPositions), 1)
def testUtcTiming(self):
"Test that direct and head works."
urlParts = ['pdash', 'utc_direct-head', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
head_pos = d.find('<UTCTiming schemeIdUri="urn:mpeg:dash:utc:http-head:2014" '
'value="http://streamtest.eu/dash/time.txt" />')
direct_pos = d.find('<UTCTiming schemeIdUri="urn:mpeg:dash:utc:direct:2014"')
self.assertLess(direct_pos, head_pos)
def testMediaSegmentInIntervalWithoutOffset(self):
"Check that segment 5 is available after an hour."
urlParts = ['pdash', 'start_3600', 'stop_3660',
'testpic', 'A1', '5.m4s']
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=3650)
d = dp.handle_request()
self.assertEqual(d[4:8], 'styp')
def testMediaSegmentInIntervalWithOffset(self):
"Check that segment 605 is available after an hour."
urlParts = ['pdash', 'start_3540', 'stop_3660',
'timeoffset_0', 'testpic', 'A1', '605.m4s']
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now=3650)
d = dp.handle_request()
self.assertEqual(d[4:8], 'styp')
def testMediaSegmentWithSidx(self):
testOutputFile = "t1_sidx.m4s"
rm_outfile(testOutputFile)
now = 2101 # 1s after start of segment 350
segment = "349.m4s"
urlParts = ['pdash', 'sidx_1', 'testpic', 'A1', segment]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=now)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
def testMediaIntervalWithOffset(self):
"Test that only segments 590-609 are available"
def get_segment(nr, now):
urlParts = ['pdash', 'start_3540', 'stop_3660',
'timeoffset_0', 'testpic', 'A1', '%d.m4s' % nr]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None,
VOD_CONFIG_DIR, CONTENT_ROOT, now)
return dp.handle_request()
res = get_segment(589, 3600)
self.assertEqual(res['ok'], False)
res = get_segment(590, 3600)
self.assertEqual(res[4:8], 'styp')
res = get_segment(609, 3670)
self.assertEqual(res[4:8], 'styp')
res = get_segment(610, 3670)
self.assertEqual(res['ok'], False)
class TestMorePathLevels(unittest.TestCase):
"Test when representations are further down in"
def setUp(self):
self.oldBaseUrlState = mpdprocessor.SET_BASEURL
mpdprocessor.SET_BASEURL = False
def tearDown(self):
mpdprocessor.SET_BASEURL = self.oldBaseUrlState
def testMPDGet(self):
mpdprocessor.SET_BASEURL = True
urlParts = ['pdash', 'testpic', 'Manifest.mpd']
dp = dash_proxy.DashProvider("streamtest.eu", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
self.assertGreater(d.find("<BaseURL>http://streamtest.eu/pdash/testpic/</BaseURL>"), 0)
def testInit(self):
urlParts = ['pdash', 'testpic', 'en', 'A1', 'init.mp4']
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=0)
d = dp.handle_request()
self.assertEqual(len(d), 617)
def testMediaSegment(self):
testOutputFile = "t2.m4s"
rm_outfile(testOutputFile)
now = 1356998460
segment = "%d.m4s" % ((now-60)/6)
urlParts = ['pdash', 'testpic', 'en', 'A1', segment]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=now)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
class TestTfdt(unittest.TestCase):
"Test that the tfdt rewrite is working correctly"
def testMediaSegment(self):
testOutputFile = "tfdt.m4s"
rm_outfile(testOutputFile)
now = 1356998460
segment = "%d.m4s" % ((now-60)/6)
urlParts = ['pdash', 'testpic', 'A1', segment]
dp = dash_proxy.DashProvider("127.0.0.1", urlParts, None, VOD_CONFIG_DIR, CONTENT_ROOT, now=now)
d = dp.handle_request()
write_data_to_outfile(d, testOutputFile)
def testTfdtValueFromZero(self):
"Tfdt value = mediaPresentationTime which corresponds to segmentNr*duration"
now = 1393936560
segNr = 232322749
segment = "%d.m4s" % segNr
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import json
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import glob2
import argparse
import numpy as np
import copy
import sys
import random
import pandas as pd
import math
from pathlib import Path
import albumentations.pytorch
from pytorchcv.model_provider import get_model as ptcv_get_model
args = {
"gpu_num" : 0,
"fps" : 5,
"img_row": 1280,
"yolo_weight" : "weights/yolov5/yolov5x_human_detection.pt",
"human_cls_weight" : "",
"falldown_cls_weight": "weights/falldown_classification/efficientnetb4b_fall_detection.pth",
"yolo_conf_thres" : 0.05,
"yolo_max_det":300,
"calibrate_confidence_l1":1.0,
"calibrate_confidence_l2":2.0,
"yolo_threshold" :0.05,
"human_threshold" : 0.0,
"falldown_threshold" : 0.25,
"dataset_dir" : "./example/",
"result" : "./answer.json",
"create_main_bbox_threshold1" : 0.8,
"create_main_bbox_threshold2" : 0.2,
"create_main_bbox_length" : 24,
"tracking_iou_threshold" : 0.7,
"tracking_frame_length" : 12,
"tracking_time_threshold" : 12,
"before_falldown_threshold" : 0.8,
"before_falldown_remove_length" : 10,
"concat_iou_threshold":0.2,
"concat_intersection_threshold":0.5,
"concat_human_threshold":0.7,
"concat_falldown_threshold":0.6,
"isBrighterOn" : False,
"bri":100,
"clip_th":50,
"inp_boundary_iou_threshold" : 0.5,
"inp_boundary_intersection_threshold" : 0.5,
"inp_boundary_time_threshold": 12
}
sys.path.append("." + '/yolov5/')
class EfficientNet_model(nn.Module):
def __init__(self, net):
super(EfficientNet_model, self).__init__()
self.backbone = net.features
self.drop = nn.Dropout(0.3)
self.out = nn.Linear(1792, 1)
def forward(self, input):
x = self.backbone(input)
x = torch.flatten(x, 1)
x = self.drop(x)
output = self.out(x)
return output
# local path for model weights
yolov5_weight = args["yolo_weight"]
human_cls_weight = args["human_cls_weight"]
falldown_cls_weight = args["falldown_cls_weight"]
# model
from yolov5 import models
# yolo image row and col
img_row = args["img_row"]
# resize row and col ratio
img_col = int(img_row * 1080 / 1920)
# Variable as dictionary
file_names = {
"result" : args["result"],
}
# torch initializaton
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
random.seed(0)
np.random.seed(0)
torch.cuda.empty_cache()
# GPU 할당 변경하기
GPU_NUM = args["gpu_num"]
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(device) # change allocation of current GPU
# load yolo model
from yolov5.models.experimental import attempt_load
from yolov5.utils.general import non_max_suppression, scale_coords
from yolov5.utils.augmentations import letterbox
model_y_new = attempt_load(yolov5_weight, map_location = device, inplace = True, fuse = True)
model_y_new.classes = [0]
model_y_new.to(device)
model_y_new.eval()
# falldown classification model
net_f = ptcv_get_model('efficientnet_b4b', pretrained=False)
Net_F = EfficientNet_model(net_f).to(device)
Net_F.load_state_dict(torch.load(falldown_cls_weight, map_location=device))
Net_F.requires_grad_(False)
Net_F.eval()
classifier_list = {
#"person_classification" : Net_P,
"falldown_classification": Net_F
}
def img_estim(image, threshold=50):
if np.mean(image) > threshold:
mode = 'light'
elif np.mean(image) > threshold // 2 and np.mean(image) <= threshold:
mode = 'dark'
else:
mode = 'verydark'
return mode, np.mean(image)
# image processor
# image bright / contrast / filter application
def sharpening(image):
kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
img = cv2.filter2D(image, -1, kernel)
return img
def contrast_img(image, clip_th=3.0, tileGridSize = (8,8)):
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=clip_th, tileGridSize=tileGridSize)
cl = clahe.apply(l)
limg = cv2.merge((cl, a, b))
img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return img
def brighter(image, bri=args["bri"]):
M = np.ones(image.shape, dtype="uint8") * bri
img = cv2.add(image, M)
return img
def brighter_revised(image, mul = 2):
img = cv2.multiply(image, (mul,mul,mul,0))
return img
def preprocess_Dark(image, bri = args["bri"], clip_th = args["clip_th"]):
mode, mean = img_estim(image)
if mode == 'dark':
#img = contrast_img(image, clip_th=3.0+(clip_th-mean)/1)
img = brighter_revised(image, mul = 2)
elif mode == 'verydark':
#img = brighter(image, bri=int((bri-mean)))
#img = contrast_img(img, clip_th=3.0+(clip_th-mean)/1)
img = brighter_revised(image, mul = 3)
else:
img = image
return img, mode
# image crop
def crop(image, label, isBrighterOn = args["isBrighterOn"]):
images = []
img = cv2.imread(image, cv2.IMREAD_COLOR)[..., ::-1]
ys, xs, _ = img.shape
for i in range(len(label)):
lbl = label[i]
y1 = max(0, lbl[1] - 3) # 3씩 +/- 하는 이유는..?
x1 = max(0, lbl[0] - 3)
if x1 > xs:
x1 = xs - 1
if y1 > ys:
y1 = ys-1
y2 = min(ys, lbl[3]+3)
x2 = min(xs, lbl[2]+3)
imgs = img[int(y1):int(y2) + 1, int(x1):int(x2) + 1]
# preprocess dark
if isBrighterOn:
imgs, _ = preprocess_Dark(imgs)
if len(imgs) > 0:
images.append(imgs)
return images
class CLSDataset(torch.utils.data.Dataset):
def __init__(self, img_ids, transform):
self.img_ids = img_ids
self.transform = transform
def __len__(self):
return len(self.img_ids)
def __getitem__(self, idx):
image = self.img_ids[idx]
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image
# albumentation
# image modifying, resize -> normalize -> convert to tensor
val_transform = albumentations.Compose([
albumentations.Resize(224, 224),
albumentations.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
albumentations.pytorch.transforms.ToTensorV2()
])
def compute_ratio(bbox):
return (bbox[3] - bbox[1]) / (bbox[2] - bbox[0])
def compute_iou2(box_a, box_b):
# compute iou with method 2 : just multiply the difference of x and y
max_x = min(box_a[2], box_b[2])
max_y = min(box_a[3], box_b[3])
min_x = max(box_a[0], box_b[0])
min_y = max(box_a[1], box_b[1])
intersection = max(max_x-min_x, 0) * max(max_y-min_y, 0)
area_a = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
union = area_a + area_b - intersection
return intersection / union
def make_int(bbox):
if len(bbox) >0:
return([int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])])
else:
return []
def yolo_find_allbbox_ctrl_size(model_y, device, img_fld, mode = "light", **kwargs):
detection = []
confidence = []
img_row = kwargs["img_row"]
conf_thres = kwargs["conf_thres"]
max_det = kwargs["max_det"]
transform = albumentations.Compose([
albumentations.CLAHE(clip_limit=(8,8), tile_grid_size=(8, 8), p=1.0),
])
for img in img_fld:
img = cv2.imread(img)
img0s = img.copy()
if mode != "light":
img = transform(image = img)['image']
img = letterbox(img, img_row, stride = int(model_y.stride.max()), auto = True)[0]
img = img.transpose((2,0,1))[::-1]
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device).float()
img = img[None]
img = img / 255.0
pred = model_y(img, augment = False, visualize = False)
pred = non_max_suppression(pred[0], conf_thres = conf_thres, max_det = max_det)
if device == "cpu":
result = pred[0].numpy()
else:
result = pred[0].detach().cpu().numpy()
dets = []
confs = []
for comp in result:
bbox = comp[0:4].tolist()
p = comp[4].tolist()
dets.append(bbox)
confs.append(p)
if len(dets) >= 1:
dets = torch.Tensor(dets)
dets = scale_coords(img.shape[2:], dets, img0s.shape).round()
dets = dets.numpy().tolist()
else:
dets = []
detection.append(dets)
confidence.append(confs)
return detection, confidence
def yolo_process(folder, model_y_new, device, **kwargs):
fps = kwargs["fps"]
img_row = kwargs["img_row"]
conf_thres = kwargs["conf_thres"]
max_det = kwargs["max_det"]
data = dict()
model_y_new.to(device)
for i in range(len(folder)):
img_fld = []
fld = sorted(glob2.glob(folder[i] + '/*.jpg'))
image = cv2.imread(fld[0])
mode, mean = img_estim(image)
for j in range(len(fld)):
if (j+1)%fps == 0:
img_fld.append(fld[j])
fld_name = folder[i]#.split('/')[-1]
fld_data1 = yolo_find_allbbox_ctrl_size(model_y_new,
device,
img_fld,
mode = mode,
img_row = img_row,
conf_thres = conf_thres,
max_det = max_det)
fld_detect = []
fld_confidence = []
for ii in range(len(fld_data1[0])):
Det = []
Conf = []
for idx, bbox in enumerate(fld_data1[0][ii]):
width = abs(bbox[3] - bbox[1])
height = abs(bbox[2] - bbox[0])
if min(width, height) > 32:
Det.append(bbox)
Conf.append(fld_data1[1][ii][idx])
fld_detect.append(Det)
fld_confidence.append(Conf)
data[fld_name] = dict()
data[fld_name]['detect'] = fld_detect
data[fld_name]['confidence'] = fld_confidence
data[fld_name]['mode'] = fps
data[fld_name]['M'] = mode
data[fld_name]['mean'] = mean
ans = {}
for j in range(len(fld_detect)):
ans[fps * (j + 1)] = fld_detect[j]
data[fld_name]['answer'] = ans
model_y_new.cpu()
torch.cuda.empty_cache()
return data
def person_cls_new(detect, fld, fps, mode, classifier, bri = None):
human_confidence = []
person_set = []
crop_imgs_set = []
for kk in range(len(fld)):
i = fps * (kk + 1)
a = detect[i]
crop_imgs_per_frame = crop(fld[kk], a)
crop_imgs_set.extend(crop_imgs_per_frame)
if len(crop_imgs_per_frame) == 0:
person_set.extend([-1])
else:
person_set.extend([i for i in range(len(crop_imgs_per_frame))])
dset = CLSDataset(crop_imgs_set, val_transform)
dloader = torch.utils.data.DataLoader(dset, shuffle = False, batch_size = 32, num_workers = 4)
score_list = []
for j, d in enumerate(dloader):
with torch.no_grad():
score = torch.sigmoid(classifier(d.cuda())).cpu().numpy()
score_list.extend(score.tolist())
score_list_per_frame = []
idx_score = 0
for idx, obj in enumerate(person_set):
if idx!=0 and obj != 0:
if obj == -1:
if len(score_list_per_frame) != 0:
human_confidence.append(score_list_per_frame)
score_list_per_frame = []
human_confidence.append([])
continue
score_list_per_frame.extend(score_list[idx_score])
if idx == len(person_set) - 1:
human_confidence.append(score_list_per_frame)
idx_score += 1
elif idx != 0 and obj == 0:
if len(score_list_per_frame) != 0:
human_confidence.append(score_list_per_frame)
score_list_per_frame = []
score_list_per_frame.extend(score_list[idx_score])
if idx == len(person_set) - 1:
human_confidence.append(score_list_per_frame)
idx_score += 1
elif idx ==0 and obj != 0:
human_confidence.append([])
elif idx == 0 and obj == 0:
score_list_per_frame = []
score_list_per_frame.extend(score_list[idx_score])
idx_score += 1
return human_confidence
def falldown_cls_new(detect, fld, fps, mode, classifier):
falldown_confidence = []
person_set = []
crop_imgs_set = []
for kk in range(len(fld)):
i = fps * (kk + 1)
a = detect[i]
crop_imgs_per_frame = crop(fld[kk], a)
crop_imgs_set.extend(crop_imgs_per_frame)
if len(crop_imgs_per_frame) == 0:
person_set.extend([-1])
else:
person_set.extend([i for i in range(len(crop_imgs_per_frame))])
dset = CLSDataset(crop_imgs_set, val_transform)
dloader = torch.utils.data.DataLoader(dset, shuffle = False, batch_size = 32, num_workers = 4)
score_list = []
for j, d in enumerate(dloader):
with torch.no_grad():
score = torch.sigmoid(classifier(d.cuda())).cpu().numpy()
score_list.extend(score.tolist())
score_list_per_frame = []
falldown_confidence = []
idx_score = 0
for idx, obj in enumerate(person_set):
if idx!=0 and obj != 0:
if | |
10.0
elif sascore < 1.: sascore = 1.0
return sascore
"""Scores based on an ECFP classifier for activity."""
# clf_model = None
def load_drd2_model():
name = 'oracle/drd2.pkl'
try:
with open(name, "rb") as f:
clf_model = pickle.load(f)
except EOFError:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
return clf_model
def fingerprints_from_mol(mol):
fp = AllChem.GetMorganFingerprint(mol, 3, useCounts=True, useFeatures=True)
size = 2048
nfp = np.zeros((1, size), np.int32)
for idx,v in fp.GetNonzeroElements().items():
nidx = idx%size
nfp[0, nidx] += int(v)
return nfp
def drd2(smile):
"""Evaluate DRD2 score of a SMILES string
Args:
smiles: str
Returns:
drd_score: float
"""
if 'drd2_model' not in globals().keys():
global drd2_model
drd2_model = load_drd2_model()
mol = Chem.MolFromSmiles(smile)
if mol:
fp = fingerprints_from_mol(mol)
score = drd2_model.predict_proba(fp)[:, 1]
drd_score = float(score)
return drd_score
return 0.0
def load_cyp3a4_veith():
oracle_file = "oracle/cyp3a4_veith.pkl"
try:
with open(oracle_file, "rb") as f:
cyp3a4_veith_model = pickle.load(f)
except EOFError:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
return cyp3a4_veith_model
def cyp3a4_veith(smiles):
try:
from DeepPurpose import utils
except:
raise ImportError("Please install DeepPurpose by 'pip install DeepPurpose'")
import os
os.environ["CUDA_VISIBLE_DEVICES"]='-1'
if 'cyp3a4_veith_model' not in globals().keys():
global cyp3a4_veith_model
cyp3a4_veith_model = load_cyp3a4_veith()
import warnings, os
warnings.filterwarnings("ignore")
X_drug = [smiles]
drug_encoding = 'CNN'
y = [1]
X_pred = utils.data_process(X_drug = X_drug, y = y, drug_encoding = drug_encoding, split_method='no_split')
# cyp3a4_veith_model = cyp3a4_veith_model.to("cuda:0")
y_pred = cyp3a4_veith_model.predict(X_pred)
return y_pred[0]
## from https://github.com/wengong-jin/iclr19-graph2graph/blob/master/props/properties.py
## from https://github.com/wengong-jin/multiobj-rationale/blob/master/properties.py
def similarity(smiles_a, smiles_b):
"""Evaluate Tanimoto similarity between 2 SMILES strings
Args:
smiles_a: str, SMILES string
smiles_b: str, SMILES string
Returns:
similarity score: float, between 0 and 1.
"""
if smiles_a is None or smiles_b is None:
return 0.0
amol = Chem.MolFromSmiles(smiles_a)
bmol = Chem.MolFromSmiles(smiles_b)
if amol is None or bmol is None:
return 0.0
fp1 = AllChem.GetMorganFingerprintAsBitVect(amol, 2, nBits=2048, useChirality=False)
fp2 = AllChem.GetMorganFingerprintAsBitVect(bmol, 2, nBits=2048, useChirality=False)
return DataStructs.TanimotoSimilarity(fp1, fp2)
def qed(smiles):
"""Evaluate QED score of a SMILES string
Args:
smiles: str
Returns:
qed_score: float, between 0 and 1.
"""
if smiles is None:
return 0.0
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return 0.0
return QED.qed(mol)
def penalized_logp(s):
"""Evaluate LogP score of a SMILES string
Args:
smiles: str
Returns:
logp_score: float, between - infinity and + infinity
"""
if s is None:
return -100.0
mol = Chem.MolFromSmiles(s)
if mol is None:
return -100.0
logP_mean = 2.4570953396190123
logP_std = 1.434324401111988
SA_mean = -3.0525811293166134
SA_std = 0.8335207024513095
cycle_mean = -0.0485696876403053
cycle_std = 0.2860212110245455
log_p = Descriptors.MolLogP(mol)
# SA = -sascorer.calculateScore(mol)
SA = -calculateScore(mol)
# cycle score
cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = -cycle_length
normalized_log_p = (log_p - logP_mean) / logP_std
normalized_SA = (SA - SA_mean) / SA_std
normalized_cycle = (cycle_score - cycle_mean) / cycle_std
return normalized_log_p + normalized_SA + normalized_cycle
def SA(s):
"""Evaluate SA score of a SMILES string
Args:
smiles: str
Returns:
SAscore: float
"""
if s is None:
return 100
mol = Chem.MolFromSmiles(s)
if mol is None:
return 100
SAscore = calculateScore(mol)
return SAscore
def load_gsk3b_model():
gsk3_model_path = 'oracle/gsk3b.pkl'
#print_sys('==== load gsk3b oracle =====')
try:
with open(gsk3_model_path, 'rb') as f:
gsk3_model = pickle.load(f)
except EOFError:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
return gsk3_model
def gsk3b(smiles):
"""Evaluate GSK3B score of a SMILES string
Args:
smiles: str
Returns:
gsk3_score: float, between 0 and 1.
"""
if 'gsk3_model' not in globals().keys():
global gsk3_model
gsk3_model = load_gsk3b_model()
molecule = smiles_to_rdkit_mol(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, features)
fp = features.reshape(1, -1)
gsk3_score = gsk3_model.predict_proba(fp)[0,1]
return gsk3_score
class jnk3:
"""Evaluate JSK3 score of a SMILES string
Args:
smiles: str
Returns:
jnk3_score: float , between 0 and 1.
"""
def __init__(self):
jnk3_model_path = 'oracle/jnk3.pkl'
try:
with open(jnk3_model_path, 'rb') as f:
self.jnk3_model = pickle.load(f)
except EOFError:
import sys
sys.exit("TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.")
def __call__(self, smiles):
molecule = smiles_to_rdkit_mol(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, features)
fp = features.reshape(1, -1)
jnk3_score = self.jnk3_model.predict_proba(fp)[0,1]
return jnk3_score
class AtomCounter:
def __init__(self, element):
"""
Args:
element: element to count within a molecule
"""
self.element = element
def __call__(self, mol):
"""
Count the number of atoms of a given type.
Args:
mol: molecule
Returns:
The number of atoms of the given type.
"""
# if the molecule contains H atoms, they may be implicit, so add them
if self.element == 'H':
mol = Chem.AddHs(mol)
return sum(1 for a in mol.GetAtoms() if a.GetSymbol() == self.element)
def parse_molecular_formula(formula):
"""
Parse a molecular formulat to get the element types and counts.
Args:
formula: molecular formula, f.i. "C8H3F3Br"
Returns:
A list of tuples containing element types and number of occurrences.
"""
import re
matches = re.findall(r'([A-Z][a-z]*)(\d*)', formula)
# Convert matches to the required format
results = []
for match in matches:
# convert count to an integer, and set it to 1 if the count is not visible in the molecular formula
count = 1 if not match[1] else int(match[1])
results.append((match[0], count))
return results
class Isomer_scoring:
def __init__(self, target_smiles, means = 'geometric'):
assert means in ['geometric', 'arithmetic']
if means == 'geometric':
self.mean_func = gmean
else:
self.mean_func = np.mean
atom2cnt_lst = parse_molecular_formula(target_smiles)
total_atom_num = sum([cnt for atom,cnt in atom2cnt_lst])
self.total_atom_modifier = GaussianModifier(mu=total_atom_num, sigma=2.0)
self.AtomCounter_Modifier_lst = [((AtomCounter(atom)), GaussianModifier(mu=cnt,sigma=1.0)) for atom,cnt in atom2cnt_lst]
def __call__(self, test_smiles):
molecule = smiles_to_rdkit_mol(test_smiles)
all_scores = []
for atom_counter, modifier_func in self.AtomCounter_Modifier_lst:
all_scores.append(modifier_func(atom_counter(molecule)))
### total atom number
atom2cnt_lst = parse_molecular_formula(test_smiles)
## todo add Hs
total_atom_num = sum([cnt for atom,cnt in atom2cnt_lst])
all_scores.append(self.total_atom_modifier(total_atom_num))
return self.mean_func(all_scores)
def isomer_meta(target_smiles, means = 'geometric'):
return Isomer_scoring(target_smiles, means = means)
isomers_c7h8n2o2 = isomer_meta(target_smiles = 'C7H8N2O2', means = 'geometric')
isomers_c9h10n2o2pf2cl = isomer_meta(target_smiles = 'C9H10N2O2PF2Cl', means = 'geometric')
class rediscovery_meta:
def __init__(self, target_smiles, fp = 'ECFP4'):
self.similarity_func = fp2fpfunc[fp]
self.target_fp = self.similarity_func(target_smiles)
def __call__(self, test_smiles):
test_fp = self.similarity_func(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(self.target_fp, test_fp)
return similarity_value
class similarity_meta:
def __init__(self, target_smiles, fp = 'FCFP4', modifier_func = None):
self.similarity_func = fp2fpfunc[fp]
self.target_fp = self.similarity_func(target_smiles)
self.modifier_func = modifier_func
def __call__(self, test_smiles):
test_fp = self.similarity_func(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(self.target_fp, test_fp)
if self.modifier_func is None:
modifier_score = similarity_value
else:
modifier_score = self.modifier_func(similarity_value)
return modifier_score
celecoxib_rediscovery = rediscovery_meta(target_smiles = 'CC1=CC=C(C=C1)C1=CC(=NN1C1=CC=C(C=C1)S(N)(=O)=O)C(F)(F)F', fp = 'ECFP4')
troglitazone_rediscovery = rediscovery_meta(target_smiles = 'Cc1c(C)c2OC(C)(COc3ccc(CC4SC(=O)NC4=O)cc3)CCc2c(C)c1O', fp = 'ECFP4')
thiothixene_rediscovery = rediscovery_meta(target_smiles = 'CN(C)S(=O)(=O)c1ccc2Sc3ccccc3C(=CCCN4CCN(C)CC4)c2c1', fp = 'ECFP4')
similarity_modifier = ClippedScoreModifier(upper_x=0.75)
aripiprazole_similarity = similarity_meta(target_smiles = 'Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl',
fp = 'FCFP4',
modifier_func = similarity_modifier)
albuterol_similarity = similarity_meta(target_smiles = 'CC(C)(C)NCC(O)c1ccc(O)c(CO)c1',
fp = 'FCFP4',
modifier_func = similarity_modifier)
mestranol_similarity = similarity_meta(target_smiles = 'COc1ccc2[C@H]3CC[C@@]4(C)[C@@H](CC[C@@]4(O)C#C)[C@@H]3CCc2c1',
fp = 'AP',
modifier_func = similarity_modifier)
class median_meta:
def __init__(self, target_smiles_1, target_smiles_2, fp1 = 'ECFP6', fp2 = 'ECFP6', modifier_func1 = None, modifier_func2 = None, means = 'geometric'):
self.similarity_func1 = fp2fpfunc[fp1]
self.similarity_func2 = fp2fpfunc[fp2]
self.target_fp1 = self.similarity_func1(target_smiles_1)
self.target_fp2 = self.similarity_func2(target_smiles_2)
self.modifier_func1 = modifier_func1
self.modifier_func2 = modifier_func2
assert means in ['geometric', 'arithmetic']
self.mean_func = mean2func[means]
def __call__(self, test_smiles):
test_fp1 = self.similarity_func1(test_smiles)
test_fp2 = test_fp1 if self.similarity_func2 == self.similarity_func1 else self.similarity_func2(test_smiles)
similarity_value1 = DataStructs.TanimotoSimilarity(self.target_fp1, test_fp1)
similarity_value2 = DataStructs.TanimotoSimilarity(self.target_fp2, test_fp2)
if self.modifier_func1 is None:
modifier_score1 = similarity_value1
else:
modifier_score1 = self.modifier_func1(similarity_value1)
if self.modifier_func2 is None:
modifier_score2 = similarity_value2
else:
modifier_score2 = self.modifier_func2(similarity_value2)
final_score = self.mean_func([modifier_score1 , modifier_score2])
return final_score
camphor_smiles = 'CC1(C)C2CCC1(C)C(=O)C2'
menthol_smiles = 'CC(C)C1CCC(C)CC1O'
median1 = median_meta(target_smiles_1 = camphor_smiles,
target_smiles_2 = menthol_smiles,
fp1 = 'ECFP4',
fp2 = 'ECFP4',
modifier_func1 = None,
modifier_func2 = None,
means = 'geometric')
tadalafil_smiles = 'O=C1N(CC(N2C1CC3=C(C2C4=CC5=C(OCO5)C=C4)NC6=C3C=CC=C6)=O)C'
sildenafil_smiles = 'CCCC1=NN(C2=C1N=C(NC2=O)C3=C(C=CC(=C3)S(=O)(=O)N4CCN(CC4)C)OCC)C'
median2 = median_meta(target_smiles_1 = tadalafil_smiles,
target_smiles_2 = sildenafil_smiles,
fp1 = 'ECFP6',
fp2 = 'ECFP6',
modifier_func1 = | |
###################################################################################
# #
# This is a comprehensive set of analysis methods and tools #
# for the standart output of the Neutron Star Merger simulations #
# done with WhiskyTHC code. #
# #
###################################################################################
from __future__ import division
import os.path
import click
import h5py
from math import log10
import sys
from argparse import ArgumentParser
import time
from scidata.carpet.interp import Interpolator
# from py.path import local
# path.append('modules/')
# import matplotlib
# matplotlib.use("Agg")
# import matplotlib.pyplot as plt
# from matplotlib import rc
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# import warnings
# import click
# warnings.filterwarnings("ignore",category=matplotlib.mplDeprecation)
from preanalysis import SIM_STATUS, PRINT_SIM_STATUS, LOAD_ITTIME
from plotting_methods import PLOT_MANY_TASKS
from utils import *
""" ==============================================| SETTINGS |====================================================== """
__rootoutdir__ = "profiles/"
__addprofdir__ = "3d/"
__profile__ = {"tasklist": ["all", "corr", "hist", "slice", "mass", "densmode", "vtk", "densmode",
"densmodeint", "mjenclosed",
"plotall", "plotcorr", "plotslicecorr", "plothist", "plotslice", "plotmass", "slicecorr",
"plotdensmode", "plotcenterofmass", "plotdensmodephase"]}
__masks__ = ["disk", "remnant"]#, "rl" ,"rl_Ye04", "rl_theta60", "rl_hu0"]
__d3slicesvns__ = ["x", "y", "z", "rho", "w_lorentz", "vol", "press", "entr", "eps", "lapse", "velx", "vely", "velz",
"gxx", "gxy", "gxz", "gyy", "gyz", "gzz", "betax", "betay", "betaz", 'temp', 'Ye'] + \
["u_0", "density", "enthalpy", "vphi", "vr", "dens_unb_geo", "dens_unb_bern", "dens_unb_garch",
"ang_mom", "ang_mom_flux", "theta", "r", "phi", "hu_0"]
__d3corrs__ = ["rho_r", "rho_Ye", "r_Ye", "temp_Ye", "rho_temp", "rho_theta", "velz_theta", "rho_ang_mom", "velz_Ye",
"rho_ang_mom_flux", "rho_dens_unb_bern", "ang_mom_flux_theta",
"ang_mom_flux_dens_unb_bern", "inv_ang_mom_flux_dens_unb_bern",
"velz_dens_unb_bern", "Ye_dens_unb_bern", "theta_dens_unb_bern",
"hu_0_ang_mom", "hu_0_ang_mom_flux", "hu_0_Ye", "hu_0_temp", "hu_0_entr", "Ye_entr" #"hu_0_pressure"
]
__d2corrs__ = [ "Q_eff_nua_u_0", "Q_eff_nua_hu_0", "Q_eff_nua_dens_unb_bern",
"Q_eff_nua_over_density_hu_0", "Q_eff_nua_over_density_theta", "Q_eff_nua_over_density_Ye",
"Q_eff_nua_Ye", "velz_Ye"]
__d3histvns__ = ["r", "theta", "Ye", "entr", "temp", "velz", "rho", "dens_unb_bern", "press"]
__d3slicesplanes__ = ["xy", "xz"]
__d3diskmass__ = "disk_mass.txt"
__d3remnantmass__ = "remnant_mass.txt"
__d3intmjfname__ = "MJ_encl.txt"
__d3densitymodesfame__ = "density_modes_lap15.h5"
__center_of_mass_plotname__ = "center_of_mass.png"
# --- ploting ---
__d3sliceplotvns__ = ["Ye", "velx", "rho", "ang_mom_flux","ang_mom","dens_unb_garch",
"dens_unb_bern","dens_unb_geo","vr","vphi","enthalpy",
"density","temp","velz","vely","lapse","entr","eps","press","vol","w_lorentz",
"Q_eff_nua", "Q_eff_nue", "Q_eff_nux"]
__d3sliceplotrls__ = [0, 1, 2, 3, 4, 5, 6]
""" ==========================================| GRID CLASSES |====================================================== """
class POLAR_GRID:
"""
Creates a stretched cylindrical grid and allows
to interpolate any data from carpet grid onto it.
Stretched means, that the grid consists of 2 parts:
1) linear distribution in terms of radius (0-15)
2) logarithmic dist. in terms of radius (15-512)
Class stores the grid information in its own variables
that can be accessed directly or through
`get_new_grid(v_n)` method
Requirements:
> dictionary grid_info{} that describes the grid:
> class `carpet_grid` from scidata
Usage:
to access the new grid mesh arrays use:
get_new_grid(v_n)
to do the interpolation of arr, use
get_int_arr(arr)
"""
def __init__(self):
self.grid_info = {'type': 'pol', 'n_r': 150, 'n_phi': 150}
self.grid_type = self.grid_info['type']
# self.carpet_grid = carpet_grid
self.list_int_grid_v_ns = ["x_pol", "y_pol",
"r_pol", "phi_pol",
"dr_pol", "dphi_pol"]
print('-' * 25 + 'INITIALIZING POLAR GRID' + '-' * 25)
phi_pol, r_pol, self.dphi_pol_2d, self.dr_pol_2d = self.get_phi_r_grid()
self.r_pol_2d, self.phi_pol_2d = np.meshgrid(r_pol, phi_pol, indexing='ij')
self.x_pol_2d = self.r_pol_2d * np.cos(self.phi_pol_2d)
self.y_pol_2d = self.r_pol_2d * np.sin(self.phi_pol_2d)
print("\t GRID: [phi:r] = [{}:{}]".format(len(phi_pol), len(r_pol)))
print("\t GRID: [x_pol_2d: ({},{})] {} pints".format(self.x_pol_2d.min(), self.x_pol_2d.max(), len(self.x_pol_2d[:,0])))
print("\t GRID: [y_pol_2d: ({},{})] {} pints".format(self.y_pol_2d.min(), self.y_pol_2d.max(), len(self.y_pol_2d[0,:])))
print('-' * 30 + '------DONE-----' + '-' * 30)
print('\n')
# cylindrical grid
@staticmethod
def make_stretched_grid(x0, x1, x2, nlin, nlog):
assert x1 > 0
assert x2 > 0
x_lin_f = np.linspace(x0, x1, nlin)
x_log_f = 10.0 ** np.linspace(log10(x1), log10(x2), nlog)
return np.concatenate((x_lin_f, x_log_f))
def get_phi_r_grid(self):
# extracting grid info
n_r = self.grid_info["n_r"]
n_phi = self.grid_info["n_phi"]
# constracting the grid
r_cyl_f = self.make_stretched_grid(0., 15., 512., n_r, n_phi)
phi_cyl_f = np.linspace(0, 2 * np.pi, n_phi)
# edges -> bins (cells)
r_cyl = 0.5 * (r_cyl_f[1:] + r_cyl_f[:-1])
phi_cyl = 0.5 * (phi_cyl_f[1:] + phi_cyl_f[:-1])
# 1D grind -> 3D grid (to mimic the r, z, phi structure)
dr_cyl = np.diff(r_cyl_f)[:, np.newaxis]
dphi_cyl = np.diff(phi_cyl_f)[np.newaxis, :]
return phi_cyl, r_cyl, dphi_cyl, dr_cyl
# generic methods to be present in all INTERPOLATION CLASSES
# def get_int_arr(self, arr_3d):
#
# # if not self.x_cyl_3d.shape == arr_3d.shape:
# # raise ValueError("Passed for interpolation 3d array has wrong shape:\n"
# # "{} Expected {}".format(arr_3d.shape, self.x_cyl_3d.shape))
# xi = np.column_stack([self.x_cyl_3d.flatten(),
# self.y_cyl_3d.flatten(),
# self.z_cyl_3d.flatten()])
# F = Interpolator(self.carpet_grid, arr_3d, interp=1)
# res_arr_3d = F(xi).reshape(self.x_cyl_3d.shape)
# return res_arr_3d
def get_xi(self):
return np.column_stack([self.x_pol_2d.flatten(),
self.y_pol_2d.flatten()])
def get_shape(self):
return self.x_pol_2d.shape
def get_int_grid(self, v_n):
if v_n == "x_pol":
return self.x_pol_2d
elif v_n == "y_pol":
return self.y_pol_2d
elif v_n == "r_pol":
return self.r_pol_2d
elif v_n == "phi_pol":
return self.phi_pol_2d
elif v_n == "dr_pol":
return self.dr_pol_2d
elif v_n == "dphi_pol":
return self.dphi_pol_2d
else:
raise NameError("v_n: {} not recogized in grid. Available:{}"
.format(v_n, self.list_int_grid_v_ns))
def save_grid(self, sim, outdir="profiles/"):
path = Paths.ppr_sims + sim + '/' + outdir
outfile = h5py.File(path + str(self.grid_type) + '_grid.h5', "w")
if not os.path.exists(path):
os.makedirs(path)
# print("Saving grid...")
for v_n in self.list_int_grid_v_ns:
outfile.create_dataset(v_n, data=self.get_int_grid(v_n))
outfile.close()
class CYLINDRICAL_GRID:
"""
Creates a stretched cylindrical grid and allows
to interpolate any data from carpet grid onto it.
Stretched means, that the grid consists of 2 parts:
1) linear distribution in terms of radius (0-15)
2) logarithmic dist. in terms of radius (15-512)
Class stores the grid information in its own variables
that can be accessed directly or through
`get_new_grid(v_n)` method
Requirements:
> dictionary grid_info{} that describes the grid:
> class `carpet_grid` from scidata
Usage:
to access the new grid mesh arrays use:
get_new_grid(v_n)
to do the interpolation of arr, use
get_int_arr(arr)
"""
def __init__(self, grid_info = None):
if grid_info == None:
self.grid_info = {'type': 'cyl', 'n_r': 150, 'n_phi': 150, 'n_z': 100}
else:
self.grid_info = grid_info
self.grid_type = self.grid_info['type']
# self.carpet_grid = carpet_grid
self.list_int_grid_v_ns = ["x_cyl", "y_cyl", "z_cyl",
"r_cyl", "phi_cyl",
"dr_cyl", "dphi_cyl", "dz_cyl"]
print('-' * 25 + 'INITIALIZING CYLINDRICAL GRID' + '-' * 25)
phi_cyl, r_cyl, z_cyl, \
self.dphi_cyl_3d, self.dr_cyl_3d, self.dz_cyl_3d = self.get_phi_r_z_grid()
self.r_cyl_3d, self.phi_cyl_3d, self.z_cyl_3d \
= np.meshgrid(r_cyl, phi_cyl, z_cyl, indexing='ij')
self.x_cyl_3d = self.r_cyl_3d * np.cos(self.phi_cyl_3d)
self.y_cyl_3d = self.r_cyl_3d * np.sin(self.phi_cyl_3d)
print("\t GRID: [phi:r:z] = [{}:{}:{}]".format(len(phi_cyl), len(r_cyl), len(z_cyl)))
print("\t GRID: [x_sph_3d: ({},{})] {} pints".format(self.x_cyl_3d.min(), self.x_cyl_3d.max(), len(self.x_cyl_3d[:,0,0])))
print("\t GRID: [y_sph_3d: ({},{})] {} pints".format(self.y_cyl_3d.min(), self.y_cyl_3d.max(), len(self.y_cyl_3d[0,:,0])))
print("\t GRID: [z_sph_3d: ({},{})] {} pints".format(self.z_cyl_3d.min(), self.z_cyl_3d.max(), len(self.z_cyl_3d[0,0,:])))
print('-' * 30 + '------DONE-----' + '-' * 30)
print('\n')
# cylindrical grid
@staticmethod
def make_stretched_grid(x0, x1, x2, nlin, nlog):
assert x1 > 0
assert x2 > 0
x_lin_f = np.linspace(x0, x1, nlin)
x_log_f = 10.0 ** np.linspace(log10(x1), log10(x2), nlog)
return np.concatenate((x_lin_f, x_log_f))
def get_phi_r_z_grid(self):
# extracting grid info
n_r = self.grid_info["n_r"]
n_phi = self.grid_info["n_phi"]
n_z = self.grid_info["n_z"]
# constracting the grid
r_cyl_f = self.make_stretched_grid(0., 15., 512., n_r, n_phi)
z_cyl_f = self.make_stretched_grid(0., 15., 512., n_r, n_phi)
phi_cyl_f = np.linspace(0, 2 * np.pi, n_phi)
# edges -> bins (cells)
r_cyl = 0.5 * (r_cyl_f[1:] + r_cyl_f[:-1])
z_cyl = 0.5 * (z_cyl_f[1:] + z_cyl_f[:-1])
phi_cyl = 0.5 * (phi_cyl_f[1:] + phi_cyl_f[:-1])
# 1D grind -> 3D grid (to mimic the r, z, phi structure)
dr_cyl = np.diff(r_cyl_f)[:, np.newaxis, np.newaxis]
dphi_cyl = np.diff(phi_cyl_f)[np.newaxis, :, np.newaxis]
dz_cyl = np.diff(z_cyl_f)[np.newaxis, np.newaxis, :]
return phi_cyl, r_cyl, z_cyl, dphi_cyl, dr_cyl, dz_cyl
# generic methods to be present in all INTERPOLATION CLASSES
# def get_int_arr(self, arr_3d):
#
# # if not self.x_cyl_3d.shape == arr_3d.shape:
# # raise ValueError("Passed for interpolation 3d array has wrong shape:\n"
# # "{} Expected {}".format(arr_3d.shape, self.x_cyl_3d.shape))
# xi = np.column_stack([self.x_cyl_3d.flatten(),
# self.y_cyl_3d.flatten(),
# self.z_cyl_3d.flatten()])
# F = Interpolator(self.carpet_grid, arr_3d, interp=1)
# res_arr_3d = F(xi).reshape(self.x_cyl_3d.shape)
# return res_arr_3d
def get_xi(self):
return np.column_stack([self.x_cyl_3d.flatten(),
self.y_cyl_3d.flatten(),
self.z_cyl_3d.flatten()])
def get_shape(self):
return self.x_cyl_3d.shape
def get_int_grid(self, v_n):
if v_n == "x_cyl":
return self.x_cyl_3d
elif v_n == "y_cyl":
return self.y_cyl_3d
elif v_n == "z_cyl":
return self.z_cyl_3d
elif v_n == "r_cyl":
return self.r_cyl_3d
elif v_n == "phi_cyl":
return self.phi_cyl_3d
elif v_n == "dr_cyl":
return self.dr_cyl_3d
elif v_n == "dphi_cyl":
return self.dphi_cyl_3d
elif v_n == "dz_cyl":
return self.dz_cyl_3d
else:
raise NameError("v_n: {} not recogized in grid. Available:{}"
.format(v_n, self.list_int_grid_v_ns))
def save_grid(self, sim, outdir="profiles/"):
path = Paths.ppr_sims + sim + '/' + outdir
outfile = h5py.File(path + str(self.grid_type) + '_grid.h5', "w")
if not os.path.exists(path):
os.makedirs(path)
# print("Saving grid...")
for v_n in self.list_int_grid_v_ns:
outfile.create_dataset(v_n, data=self.get_int_grid(v_n))
outfile.close()
class SPHERICAL_GRID:
"""
Creates a stretched spherical grid and allows
to interpolate any data from carpet grid onto it.
Stretched means, that the grid consists of 2 parts:
1) linear distribution in terms of radius (0-15)
2) logarithmic dist. in terms of radius (15-512)
Class stores the grid information in its own variables
that can be accessed directly or through
`get_new_grid(v_n)` method
Requirements:
> dictionary grid_info{} that describes the grid:
> class `carpet_grid` from scidata
Usage:
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
_
/ | | __ _ __ _
/ | / |_||_|| ||
/ | / | |\ | ||_
/____ |__/\ . | | \|_|\_|
__________________________ .
███████╗██████╗ ██████╗ ██████╗██╗ ██╗
██╔════╝██╔══██╗██╔═══██╗██╔════╝██║ ██║
█████╗ ██████╔╝██║ ██║██║ ███████║
██╔══╝ ██╔═══╝ ██║ ██║██║ ██╔══██║
███████╗██║ ╚██████╔╝╚██████╗██║ ██║
╚══════╝╚═╝ ╚═════╝ ╚═════╝╚═╝ ╚═╝
Created on Thu May 10 09:58:18 2018
@author: chrisunderwood
A file to be ran on scarf
A new file to do analysis on the epoch files
reads in the folder path by parsing the option in.
creates distfncevolution for whole simulation
creates final electron spectrum
creates number density ploy with the laser pulse ontop
There are several cmd line options when running the file:
"--folder", "-f", type=str, required=True
Folder path of simulation, and then duplicate dir in home folder
"--vmin", "-l", type=str, required=False
min density on num dens plot
"--vmax", "-u", type=str, required=False
max dens on num dens plot
Both vmin and vmax are required or not at all currently
"--append", "-a", type=str, required=False
append onto previous files
"""
import argparse
import numpy as np
import os
import matplotlib.pyplot as plt
import sdf_helper as sh
import matplotlib as mpl
#from matplotlib.colors import colorConverter
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
from matplotlib import ticker
import matplotlib as mpl
def setup_figure_fontAttributes(size = 12, family = 'normal', weight = 'normal'):
font = {'family' : family,
'weight' : weight,
'size' : size}
mpl.rc('font', **font)
#==============================================================================
# Define global constants
#==============================================================================
q_e = 1.6e-19
m_e = 9.11e-31
def nearposn(array,value):
posn = (abs(array-value)).argmin()
return posn
def FilesInFolder(DirectoryPath, splice):
files = os.listdir(DirectoryPath)
shots = []
inputDeck = []
for i in files:
if not i.startswith('.') and i.endswith('.sdf'):
shots.append(i)
if not i.startswith('.') and i.endswith('.deck'):
inputDeck.append(i)
# Sort
timeStep = []
for i in range(len(shots)):
# print shots[i], '\t',shots[i][splice[0]:splice[1]]
timeStep.append(int(shots[i][splice[0]:splice[-1]]))
timeStep = np.asarray(timeStep)
sorted_points = sorted(zip(timeStep, shots))
timeStep = [point[0] for point in sorted_points]
sdf_list = [point[1] for point in sorted_points]
return sdf_list, timeStep, inputDeck
def savedFiles(DirectoryPath):
files = os.listdir(DirectoryPath + 'NumberDensity_with_L/')
shots = []
for i in files:
if not i.startswith('.') and i.endswith('.png'):
shots.append(i)
# Sort
timeStep = []
for i in range(len(shots)):
print shots[i], '\t',shots[i][14:-4]
timeStep.append(int(shots[i][14:-4]))
timeStep = np.asarray(timeStep)
sorted_points = sorted(zip(timeStep, shots))
timeStep = [point[0] for point in sorted_points]
png_list = [point[1] for point in sorted_points]
return png_list, timeStep
def createTicks(x, nos, exp = False):
#==============================================================================
# function creates tick labels at positions, so imshow can be used
# this massively increases the run speed over pcolor
#==============================================================================
xlen = len(x)
x_sep = int(xlen / nos)
xpos = []
xval = []
start = 0
while(start < xlen):
xpos.append(start)
if exp:
xval.append("{0:.2e}".format(x[start]))
else:
if x[start] > 100:
xval.append("{0:.2e}".format(x[start]))
else:
xval.append("{0:.2f}".format(x[start]))
start += x_sep
return xpos, xval
def normaliseArr(arr):
arr = np.array(arr)
arr = arr - arr.min()
return arr / arr.max()
def indivual_numDens(inData,time, inpath, savepath, vMin, vMax):
plt.close()
plt.figure(figsize=(8,7))
numDens = inData.Derived_Number_Density.data
grid = inData.Grid_Grid_mid.data
x = grid[0] / 1e-3
y = grid[1] / 1e-3
Actual_time = inData.Header.values()[9]
xpos, xval = createTicks(x,6)
ypos, yval = createTicks(y,8)
# if vMin is not None and vMax is not None:
# plt.imshow(numDens.T, aspect = 'auto', vmin = vMin, vmax = vMax)
# else:
# plt.imshow(numDens.T, aspect = 'auto')
# plt.colorbar()
#Sum in each direction for lineouts
sumX = []
for im in numDens:
sumX.append(sum(im))
sumY = []
for im in numDens.T:
sumY.append(sum(im))
# print 'Len of x, y sum: ', len(sumX), ' ', len(sumY)
#==============================================================================
# Create fig with subplots in
#==============================================================================
fig = plt.figure(figsize=(8,6))
# 3 Plots with one major one and two extra ones for each axes.
gs = gridspec.GridSpec(4, 4, height_ratios=(1,1,1,1), width_ratios=(0.5,1,1,1))
gs.update(wspace=0.025, hspace=0.025)
# Create all axis, including an additional one for the cbar
ax1 = plt.subplot(gs[0:3, 1:-1]) # Image
ax1.axis('off')
ax2 = plt.subplot(gs[0:3, 0] ) # right hand side plot
ax3 = plt.subplot(gs[-1, 1:-1] ) # below plot
cax4 = fig.add_axes([0.7, 0.35, 0.05, 0.5])
# Make the axis look as I want them too
# Modify the ticks so they are easily visible
xticks = ticker.MaxNLocator(5)
ax2.yaxis.set_major_locator(xticks)
xticks = ticker.MaxNLocator(3)
ax2.xaxis.set_major_locator(xticks)
ax3.yaxis.tick_right()
ax2.ticklabel_format(style='sci', axis='x', scilimits=(0,0),useOffset=False)
ax3.ticklabel_format(style='sci', axis='y', scilimits=(0,0),useOffset=False)
im = ax1.imshow(numDens.T, aspect = 'auto')
ax2.plot(sumY, y )
ax3.plot(x, sumX)
# Set the axis limits for the plots
image_shape = np.shape(numDens.T)
ax1.set_xlim(0,image_shape[1])
ax1.set_ylim(0, image_shape[0])
ax2.set_ylim(y[0], y[-1])
ax3.set_xlim(x[0], x[-1])
ax3.set_xlabel('(mm)')
ax2.set_ylabel('(mm)')
plt.colorbar(im, cax = cax4)
plt.suptitle('Simulation Time: {0:.4f} (ps)'.format(Actual_time * 1e12))
#Create folder to save into
if not os.path.exists(savePath + 'NumberDensity/'):
os.makedirs(savePath + 'NumberDensity/')
plt.savefig(savepath + 'NumberDensity/' +'nd_' + str(time) + '.png', dpi = 150)
# plt.show()
def indivual_numDens_with_laser(inData, intTime, inpath, savepath, cmap1, vMin, vMax):
plt.close()
plt.figure(figsize=(10,7))
numDens = inData.Derived_Number_Density.data
ez = inData.Electric_Field_Ez.data
grid = inData.Grid_Grid_mid.data
x = grid[0] / 1e-6
y = grid[1] / 1e-6
Actual_time = inData.Header.values()[9]
thres2 = np.median(abs(ez)) * 10
mask = abs(ez) > thres2
xpos, xval = createTicks(x,6)
ypos, yval = createTicks(y,8)
if vMin is not None and vMax is not None:
plt.imshow(numDens.T, aspect = 'auto', vmin = vMin, vmax = vMax)
else:
minVal_cm = numDens.T.max() * 0.001
if minVal_cm < numDens.T.min():
minVal_cm = numDens.T.min()
plt.imshow(numDens.T, aspect = 'auto', vmin = minVal_cm)
cbar = plt.colorbar()
cbar.set_label(inData.Derived_Number_Density.units)
eField_masked = abs(ez) * mask
plt.imshow(eField_masked.T, cmap=cmap1, aspect = 'auto')
cbar = plt.colorbar()
cbar.set_label(inData.Electric_Field_Ez.units)
plt.xticks(xpos,xval, rotation=-90)
plt.yticks(ypos,yval)
plt.xlabel('(mu m)')
plt.ylabel('(mu m)')
plt.title('Simulation Time: {0:.4f} (ps)'.format(Actual_time * 1e12))
plt.tight_layout()
#Create folder to save into
if not os.path.exists(savePath + 'NumberDensity_with_L/'):
os.makedirs(savePath + 'NumberDensity_with_L/')
plt.savefig(savepath + 'NumberDensity_with_L/' +'nd_with_laser_' + str(intTime) + '.png', dpi = 150)
def indivual_distF(inData, intTime, inpath, savepath):
plt.close()
x = inData.dist_fn_x_px_electron.grid.data[0] * 1e6
y = inData.dist_fn_x_px_electron.grid.data[1]
cmap = plt.cm.gist_rainbow
# cmap.set_under(color = 'white')
print inData.dist_fn_x_px_electron.data.min(), inData.dist_fn_x_px_electron.data.max()
plt.imshow(inData.dist_fn_x_px_electron.data.T, aspect = 'auto', cmap = cmap) # , vmin = 1e-27 )
plt.colorbar()
xmin = x[0]; xmax=x[- 1]; ymin= y[0] ; ymax=y[- 1]
xpos, xval = createTicks(x,5)
ypos, yval = createTicks(y,6, True)
plt.xticks(xpos,xval)
plt.yticks(ypos,yval)
plt.xlabel('X(mu m)')
plt.ylabel('P_x (kg m s^-1)')
Actual_time = inData.Header.values()[9] * 1e12
plt.title('x - P_x @ t = {0:.5g} (ps)'.format(Actual_time))
plt.axis([xmin,xmax,ymin, ymax])
#Create folder to save into
if not os.path.exists(savePath + 'DistFnc/'):
os.makedirs(savePath + 'DistFnc/')
plt.savefig(savepath + 'DistFnc/' + 'distFnc' + str(intTime) + '.png', dpi = 150)
def createPlot_dist_evo(allPx_integrated, all_xaxis, yaxis, savepath, xAxis = 0):
plt.close()
cmap = plt.cm.jet
cmap.set_under(color='white')
minDensityToPlot = 1e4
maxDensityToPlot = 5e11
if xAxis == 1:
all_xaxis = all_xaxis * 1e12 #Turn into picoseconds
plt.pcolormesh(all_xaxis, yaxis, allPx_integrated.T,
norm=colors.LogNorm(), cmap = cmap,
vmin = minDensityToPlot, vmax = maxDensityToPlot)
xmin = all_xaxis[0]; xmax=all_xaxis[-1];
plt.xlabel("Time (ps)")
elif xAxis == 2:
all_xaxis = all_xaxis * 1e6 #Turn into mu m
plt.pcolormesh(all_xaxis, yaxis, allPx_integrated.T,
norm=colors.LogNorm(), cmap = cmap, vmin = minDensityToPlot)
xmin = all_xaxis[0]; xmax=all_xaxis[-1];
plt.xlabel("Distance (um)")
else:
plt.pcolormesh(all_xaxis, yaxis, allPx_integrated.T,
norm=colors.LogNorm(), cmap = cmap, vmin = minDensityToPlot)
xmin = all_xaxis[0]; xmax=all_xaxis[-1];
plt.xlabel("SDF Number")
ymin= yaxis[0] ; ymax=yaxis[- 1]
xmin = all_xaxis[0]; xmax = all_xaxis[-1]
cbar = plt.colorbar()
cbar.set_label("Density (nparticles/cell)", rotation=270)
plt.axis([xmin,xmax,ymin, ymax])
plt.ylabel("Momentum (kg.ms^-1)")
if xAxis == 1:
plt.savefig(savepath + 'Dist_evo/' + folderPath.split('/')[-2] + '_DistPx_Vs_Time.png',
dpi = 300,
bbox_inches='tight')
elif xAxis == 2:
plt.savefig(savepath + 'Dist_evo/' + folderPath.split('/')[-2] + '_DistPx_Vs_Distance.png',
dpi = 300,
bbox_inches='tight')
elif xAxis == 0:
plt.savefig(savepath + 'Dist_evo/' + folderPath.split('/')[-2] + '_DistPx_Vs_SDF_ts.png',
dpi = 300,
bbox_inches='tight')
print 'Distribution plot min, max and ave: '
print allPx_integrated.min(), allPx_integrated.max(), np.average(allPx_integrated)
#plt.show()
plt.close()
intensity = allPx_integrated[-1,:]
# intensity = (intensity - min(intensity)) / max(intensity)
plt.plot(yaxis, intensity)
np.savetxt(savepath + 'Dist_evo/' + 'Electron_Spectrum.txt', np.c_[yaxis, intensity])
plt.yscale('log')
# xmin = px_GeV[0]; xmax = 1.0; ymin = 0; ymax = 1.;
# plt.axis([xmin, xmax, ymin, ymax])
plt.xlabel(r"Energy ()")
# plt.ylabel("Intensity (Normalised)")
plt.ylabel("Intensity ()")
plt.title("Electron Bunch Spectrum")
plt.savefig(savepath + 'Dist_evo/' + 'Electron Spectrum.png',
dpi = 300)
def distFunc_pxX_plot(filelist, timesteps, inpath, savepath):
#==============================================================================
# Create a dist function for all timesteps
#==============================================================================
for f, time in zip(filelist, timesteps):
inData = sh.getdata(inpath + f)
indivual_distF(inData, time, inpath, savepath)
def numberDens(filelist, timesteps, inpath, savepath, vMin, vMax):
#==============================================================================
# Creates the plot of the number density,
#==============================================================================
setup_figure_fontAttributes(size = 11)
#For each time step create plot and then save that with its simstep position
for f, time in zip(filelist, timesteps):
inData = sh.getdata(inpath | |
<reponame>XiYe20/VPTR
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from pathlib import Path
import random
from datetime import datetime
from model import VPTREnc, VPTRDec, VPTRDisc, init_weights, VPTRFormerNAR, VPTRFormerFAR
from model import GDL, MSELoss, L1Loss, GANLoss, BiPatchNCE
from utils import KTHDataset, BAIRDataset, MovingMNISTDataset
from utils import get_dataloader
from utils import visualize_batch_clips, save_ckpt, load_ckpt, set_seed, AverageMeters, init_loss_dict, write_summary, resume_training
from utils import set_seed, gather_AverageMeters
import logging
import os
import argparse
parser = argparse.ArgumentParser(description='Datadistributed training')
parser.add_argument('--port', default='12355', type=str, help = '')
def NAR_show_samples(VPTR_Enc, VPTR_Dec, VPTR_Transformer, sample, save_dir, device, renorm_transform):
VPTR_Transformer = VPTR_Transformer.eval()
with torch.no_grad():
past_frames, future_frames = sample
past_frames = past_frames.to(device)
future_frames = future_frames.to(device)
past_gt_feats = VPTR_Enc(past_frames)
future_gt_feats = VPTR_Enc(future_frames)
rec_past_frames = VPTR_Dec(past_gt_feats)
rec_future_frames = VPTR_Dec(future_gt_feats)
pred_future_feats = VPTR_Transformer(past_gt_feats)
pred_future_frames = VPTR_Dec(pred_future_feats)
N = pred_future_frames.shape[0]
idx = min(N, 4)
visualize_batch_clips(past_frames[0:idx, :, ...], future_frames[0:idx, :, ...], pred_future_frames[0:idx, :, ...], save_dir, renorm_transform, desc = 'pred')
visualize_batch_clips(past_frames[0:idx, :, ...], rec_future_frames[0:idx, :, ...], rec_past_frames[0:idx, :, ...], save_dir, renorm_transform, desc = 'ae')
def cal_lossD(VPTR_Disc, fake_imgs, real_imgs, lam_gan):
pred_fake = VPTR_Disc(fake_imgs.detach().flatten(0, 1))
loss_D_fake = gan_loss(pred_fake, False)
# Real
pred_real = VPTR_Disc(real_imgs.flatten(0,1))
loss_D_real = gan_loss(pred_real, True)
# combine loss and calculate gradients
loss_D = (loss_D_fake + loss_D_real) * 0.5 * lam_gan
return loss_D, loss_D_fake, loss_D_real
def cal_lossT(VPTR_Disc, fake_imgs, real_imgs, fake_feats, real_feats, mse_loss, gdl_loss, bpnce, lam_pc, lam_gan):
T_MSE_loss = mse_loss(fake_imgs, real_imgs)
T_GDL_loss = gdl_loss(real_imgs, fake_imgs)
#T_PC_loss = bpnce(F.normalize(real_feats, p=2.0, dim=2), F.normalize(fake_feats, p=2.0, dim=2))
T_PC_loss = torch.zeros(1)
if VPTR_Disc is not None:
assert lam_gan is not None, "Please input lam_gan"
pred_fake = VPTR_Disc(fake_imgs.flatten(0, 1))
loss_T_gan = gan_loss(pred_fake, True)
loss_T = T_GDL_loss + T_MSE_loss + lam_pc * T_PC_loss + lam_gan * loss_T_gan
else:
loss_T_gan = torch.zeros(1)
loss_T = T_GDL_loss + T_MSE_loss# + lam_pc * T_PC_loss
return loss_T, T_GDL_loss, T_MSE_loss, T_PC_loss, loss_T_gan
def init_models(img_channels, encC, encH, encW, dropout, out_layer, rpe, TSLMA_flag, rank, batch_size, world_size,
Transformer_lr, resume_AE_ckpt,
resume_Transformer_ckpt = None, num_encoder_layers = 4, num_decoder_layers = 8,
num_past_frames = 10, num_future_frames = 10, init_Disc = False, train_Disc = False):
VPTR_Enc = VPTREnc(img_channels, feat_dim = encC, n_downsampling = 3).to(rank)
VPTR_Dec = VPTRDec(img_channels, feat_dim = encC, n_downsampling = 3, out_layer = out_layer).to(rank)
#load the trained autoencoder, we initialize the discriminator from scratch, for a balanced training
start_epoch, history_loss_dict = resume_training({'VPTR_Enc': VPTR_Enc, 'VPTR_Dec': VPTR_Dec}, {}, resume_AE_ckpt, map_location = f'cuda:{rank}')
loss_name_list = ['T_MSE', 'T_GDL', 'T_gan', 'T_total', 'T_bpc', 'Dtotal', 'Dfake', 'Dreal']
loss_dict = init_loss_dict(loss_name_list, history_loss_dict)
VPTR_Enc = DDP(VPTR_Enc, device_ids=[rank])
VPTR_Dec = DDP(VPTR_Dec, device_ids=[rank])
VPTR_Enc = VPTR_Enc.eval()
VPTR_Dec = VPTR_Dec.eval()
VPTR_Disc = None
if init_Disc:
VPTR_Disc = VPTRDisc(img_channels, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d).to(rank)
init_weights(VPTR_Disc)
if not train_Disc:
_, _ = resume_training({'VPTR_Disc': VPTR_Disc}, {}, resume_AE_ckpt, map_location = f'cuda:{rank}')
VPTR_Disc = DDP(VPTR_Disc, device_ids=[rank])
if not train_Disc:
VPTR_Disc = VPTR_Disc.eval()
VPTR_Transformer = VPTRFormerNAR(num_past_frames, num_future_frames, encH=encH, encW = encW, d_model=encC,
nhead=8, num_encoder_layers=num_encoder_layers, num_decoder_layers=num_decoder_layers, dropout=dropout,
window_size=4, Spatial_FFN_hidden_ratio=4, TSLMA_flag = TSLMA_flag, rpe = rpe).to(rank)
optimizer_T = torch.optim.AdamW(params = VPTR_Transformer.parameters(), lr = Transformer_lr)
if resume_Transformer_ckpt is not None:
start_epoch, history_loss_dict = resume_training({'VPTR_Transformer': VPTR_Transformer}, {'optimizer_T':optimizer_T}, resume_Transformer_ckpt, map_location = f'cuda:{rank}')
loss_dict = init_loss_dict(loss_name_list, history_loss_dict)
VPTR_Transformer = DDP(VPTR_Transformer, device_ids=[rank])
optimizer_D = None
gan_loss = None
if train_Disc:
optimizer_D = torch.optim.Adam(params = VPTR_Disc.parameters(), lr = Transformer_lr, betas = (0.5, 0.999))
gan_loss = GANLoss('vanilla', target_real_label=1.0, target_fake_label=0.0).to(rank)
mse_loss = MSELoss()
gdl_loss = GDL(alpha = 1)
bpnce = BiPatchNCE(batch_size//world_size, num_future_frames, encH, encW, 1.0).to(rank)
return VPTR_Enc, VPTR_Dec, VPTR_Disc, VPTR_Transformer, optimizer_D, optimizer_T, start_epoch, loss_dict, mse_loss, gdl_loss, bpnce, gan_loss, loss_name_list
def single_iter(VPTR_Enc, VPTR_Dec, VPTR_Disc, VPTR_Transformer, optimizer_T, optimizer_D, sample, rank, mse_loss, gdl_loss, bpnce, lam_pc, gan_loss, lam_gan, max_grad_norm, train_flag = True):
past_frames, future_frames = sample
past_frames = past_frames.to(rank)
future_frames = future_frames.to(rank)
with torch.no_grad():
past_gt_feats = VPTR_Enc(past_frames)
future_gt_feats = VPTR_Enc(future_frames)
if train_flag:
VPTR_Transformer = VPTR_Transformer.train()
VPTR_Transformer.zero_grad(set_to_none=True)
VPTR_Dec.zero_grad(set_to_none=True)
pred_future_feats = VPTR_Transformer(past_gt_feats)
pred_frames = VPTR_Dec(pred_future_feats)
if optimizer_D is not None:
assert lam_gan is not None, "Input lam_gan"
#update discriminator
VPTR_Disc = VPTR_Disc.train()
for p in VPTR_Disc.parameters():
p.requires_grad_(True)
VPTR_Disc.zero_grad(set_to_none=True)
loss_D, loss_D_fake, loss_D_real = cal_lossD(VPTR_Disc, pred_frames, future_frames, lam_gan)
loss_D.backward()
optimizer_D.step()
for p in VPTR_Disc.parameters():
p.requires_grad_(False)
pred_future_feats = VPTR_Transformer.module.NCE_projector(pred_future_feats.permute(0, 1, 3, 4, 2)).permute(0, 1, 4, 2, 3)
future_gt_feats = VPTR_Transformer.module.NCE_projector(future_gt_feats.permute(0, 1, 3, 4, 2)).permute(0, 1, 4, 2, 3)
#update Transformer (generator)
loss_T, T_GDL_loss, T_MSE_loss, T_PC_loss, loss_T_gan = cal_lossT(VPTR_Disc, pred_frames, future_frames, pred_future_feats, future_gt_feats, mse_loss, gdl_loss, bpnce, lam_pc, lam_gan)
loss_T.backward()
nn.utils.clip_grad_norm_(VPTR_Transformer.parameters(), max_norm=max_grad_norm, norm_type=2)
optimizer_T.step()
else:
if optimizer_D is not None:
VPTR_Disc = VPTR_Disc.eval()
VPTR_Transformer = VPTR_Transformer.eval()
with torch.no_grad():
pred_future_feats = VPTR_Transformer(past_gt_feats)
pred_frames = VPTR_Dec(pred_future_feats)
if optimizer_D is not None:
loss_D, loss_D_fake, loss_D_real = cal_lossD(VPTR_Disc, pred_frames, future_frames, lam_gan)
pred_future_feats = VPTR_Transformer.module.NCE_projector(pred_future_feats.permute(0, 1, 3, 4, 2)).permute(0, 1, 4, 2, 3)
future_gt_feats = VPTR_Transformer.module.NCE_projector(future_gt_feats.permute(0, 1, 3, 4, 2)).permute(0, 1, 4, 2, 3)
loss_T, T_GDL_loss, T_MSE_loss, T_PC_loss, loss_T_gan = cal_lossT(VPTR_Disc, pred_frames, future_frames, pred_future_feats, future_gt_feats, mse_loss, gdl_loss, bpnce, lam_pc, lam_gan)
if optimizer_D is None:
loss_D, loss_D_fake, loss_D_real = torch.zeros(1), torch.zeros(1), torch.zeros(1)
iter_loss_dict = {'T_total': loss_T.item(), 'T_MSE': T_MSE_loss.item(), 'T_gan': loss_T_gan.item(), 'T_GDL': T_GDL_loss.item(), 'T_bpc':T_PC_loss.item(), 'Dtotal': loss_D.item(), 'Dfake':loss_D_fake.item(), 'Dreal':loss_D_real.item()}
return iter_loss_dict
def setup(rank, world_size, args):
# initialize the process group
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = args.port
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
def main_worker(rank, args, world_size, img_channels, encC, encH, encW, dropout, out_layer, TSLMA_flag,
rpe, Transformer_lr, max_grad_norm, lam_pc, lam_gan, resume_AE_ckpt,
data_set_name, batch_size, data_set_dir, dev_set_size, epochs, ckpt_save_dir, tensorboard_save_dir,
resume_Transformer_ckpt = None, num_encoder_layers = 4, num_decoder_layers = 8, num_past_frames = 10,
num_future_frames = 10, init_Disc = False, train_Disc = False,
num_workers = 8, show_example_epochs = 10, save_ckpt_epochs = 2):
setup(rank, world_size, args)
torch.cuda.set_device(rank)
if rank == 0:
#############Set the logger#########
if not Path(ckpt_save_dir).exists():
Path(ckpt_save_dir).mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO,
datefmt='%a, %d %b %Y %H:%M:%S',
format='%(asctime)s - %(message)s',
filename=ckpt_save_dir.joinpath('train_log.log').absolute().as_posix(),
filemode='w')
summary_writer = SummaryWriter(tensorboard_save_dir.absolute().as_posix())
VPTR_Enc, VPTR_Dec, VPTR_Disc, VPTR_Transformer, \
optimizer_D, optimizer_T, start_epoch, loss_dict, \
mse_loss, gdl_loss, bpnce, gan_loss, loss_name_list = init_models(img_channels, encC, encH, encW, dropout, out_layer, rpe,
TSLMA_flag, rank, batch_size, world_size, Transformer_lr, resume_AE_ckpt,
resume_Transformer_ckpt, num_encoder_layers, num_decoder_layers,
num_past_frames, num_future_frames, init_Disc, train_Disc)
train_loader, val_loader, _, renorm_transform = get_dataloader(data_set_name, batch_size, data_set_dir, ngpus = world_size, num_workers = num_workers)
for epoch in range(start_epoch+1, start_epoch + epochs+1):
epoch_st = datetime.now()
#Train
print(f'train rank {rank} epoch {epoch}')
train_EpochAveMeter = AverageMeters(loss_name_list)
for idx, sample in enumerate(train_loader, 0):
iter_loss_dict = single_iter(VPTR_Enc, VPTR_Dec, VPTR_Disc, VPTR_Transformer, optimizer_T, optimizer_D,
sample, rank, mse_loss, gdl_loss, bpnce, lam_pc, gan_loss, lam_gan, max_grad_norm, train_flag = True)
train_EpochAveMeter.iter_update(iter_loss_dict)
print(f'rank{rank}, finish train')
train_ave_meters = [None for i in range(world_size)]
dist.all_gather_object(train_ave_meters, train_EpochAveMeter)
print(f'rank{rank}, gather')
if rank == 0:
train_meter = gather_AverageMeters(train_ave_meters)
loss_dict = train_meter.epoch_update(loss_dict, epoch, train_flag = True)
write_summary(summary_writer, loss_dict, train_flag = True)
if epoch % show_example_epochs == 0 or epoch == 1:
NAR_show_samples(VPTR_Enc, VPTR_Dec, VPTR_Transformer, sample, ckpt_save_dir.joinpath(f'train_gifs_epoch{epoch}'), rank, renorm_transform)
print(f'valid rank {rank} epoch {epoch}')
#validation
val_EpochAveMeter = AverageMeters(loss_name_list)
for idx, sample in enumerate(val_loader, 0):
iter_loss_dict = single_iter(VPTR_Enc, VPTR_Dec, VPTR_Disc, VPTR_Transformer, optimizer_T, optimizer_D,
sample, rank, mse_loss, gdl_loss, bpnce, lam_pc, gan_loss, lam_gan, max_grad_norm, train_flag = False)
val_EpochAveMeter.iter_update(iter_loss_dict)
val_ave_meters = [None for i in range(world_size)]
print(f'valid rank{rank}, gather')
dist.all_gather_object(val_ave_meters, val_EpochAveMeter)
print(f'valid rank{rank}, finish gather')
if rank == 0:
val_meter = gather_AverageMeters(val_ave_meters)
loss_dict = val_meter.epoch_update(loss_dict, epoch, train_flag = False)
write_summary(summary_writer, loss_dict, train_flag = False)
if epoch % show_example_epochs == 0 or epoch == 1:
NAR_show_samples(VPTR_Enc, VPTR_Dec, VPTR_Transformer, sample, ckpt_save_dir.joinpath(f'test_gifs_epoch{epoch}'), rank, renorm_transform)
if epoch % save_ckpt_epochs == 0:
save_ckpt({'VPTR_Transformer': VPTR_Transformer},
{'optimizer_T': optimizer_T}, epoch, loss_dict, ckpt_save_dir)
epoch_time = datetime.now() - epoch_st
logging.info(f"epoch {epoch}, {val_meter.meters['T_total'].avg}")
logging.info(f"Estimated remaining training time: {epoch_time.total_seconds()/3600. * (start_epoch + epochs - epoch)} Hours")
print(f'rank {rank} epoch {epoch}')
cleanup()
if __name__ == '__main__':
set_seed(3407)
args = parser.parse_args()
ckpt_save_dir = Path('/home/travail/xiyex/VPTR_ckpts/MNIST_TLMA_aug_BPNCE01_MSEGDL_NAR_mp_ckpt')
tensorboard_save_dir = Path('/home/travail/xiyex/VPTR_ckpts/MNIST_TLMA_aug_BPNCE01_MSEGDL_NAR_mp_tensorboard')
resume_AE_ckpt = Path('/home/travail/xiyex/VPTR_ckpts/MNIST_ResNetAE_MSEGDLgan001_ckpt').joinpath('epoch_93.tar')
#resume_Transformer_ckpt = ckpt_save_dir.joinpath('epoch_90.tar')
resume_Transformer_ckpt = None
data_set_name = 'MNIST'
out_layer = 'Sigmoid'
data_set_dir = '/home/travail/xiyex/MovingMNIST'
dev_set_size = 50
num_past_frames = 10
num_future_frames = 10
encH, encW, encC = 8, 8, 528
img_channels = 1
epochs = 150
batch_size = 2
num_encoder_layers = 4
num_decoder_layers = 8
Transformer_lr = 1e-4
max_grad_norm = 1.0
TSLMA_flag = False
rpe = False
lam_gan = None #0.001
lam_pc = 0.1
dropout = 0.1
init_Disc = False
train_Disc = False
num_workers = 1
world_size = 4
show_example_epochs = 1
save_ckpt_epochs = 1
print("Start training....")
mp.spawn(main_worker,
args=(args, world_size, img_channels, encC, encH, encW, | |
0.0029 -0.04173 -1.23600 0.2529 7.5 -0.5096 0.00000 0.0483 6.75 1000 750 2.5 3.2 -0.72744 -0.46341 0.6651 0.3792
0.260 2.48747 0.0029 -0.04768 -1.21882 0.2529 7.5 -0.5096 0.00000 0.0478 6.75 1000 750 2.5 3.2 -0.77335 -0.48705 0.6650 0.3754
0.280 2.38739 0.0029 -0.05178 -1.19543 0.2529 7.5 -0.5096 0.00000 0.0474 6.75 1000 750 2.5 3.2 -0.80508 -0.47334 0.6590 0.3757
0.300 2.30150 0.0029 -0.05672 -1.17072 0.2529 7.5 -0.5096 0.00000 0.0469 6.75 1000 750 2.5 3.2 -0.82609 -0.45730 0.6599 0.3816
0.320 2.17298 0.0029 -0.06015 -1.13847 0.2529 7.5 -0.5096 0.00000 0.0464 6.75 1000 750 2.5 3.2 -0.84080 -0.44267 0.6654 0.3866
0.340 2.07474 0.0029 -0.06508 -1.11131 0.2529 7.5 -0.5096 0.00000 0.0459 6.75 1000 750 2.5 3.2 -0.86251 -0.43888 0.6651 0.3881
0.360 2.01953 0.0029 -0.06974 -1.09484 0.2529 7.5 -0.5096 0.00000 0.0459 6.75 1000 750 2.5 3.2 -0.87479 -0.43820 0.6662 0.3924
0.380 1.95078 0.0029 -0.07346 -1.07812 0.2529 7.5 -0.5096 0.00000 0.0429 6.75 1000 750 2.5 3.2 -0.88522 -0.43678 0.6698 0.3945
0.400 1.89372 0.0029 -0.07684 -1.06530 0.2529 7.5 -0.5096 0.00000 0.0400 6.75 1000 750 2.5 3.2 -0.89517 -0.43008 0.6697 0.3962
0.420 1.83717 0.0029 -0.08010 -1.05451 0.2529 7.5 -0.5096 0.00000 0.0374 6.75 1000 750 2.5 3.2 -0.90875 -0.42190 0.6696 0.389
0.440 1.77528 0.0029 -0.08296 -1.04332 0.2529 7.5 -0.5096 0.00000 0.0349 6.75 1000 750 2.5 3.2 -0.91922 -0.40903 0.6641 0.3929
0.460 1.73155 0.0029 -0.08623 -1.03572 0.2529 7.5 -0.5096 0.00000 0.0323 6.75 1000 750 2.5 3.2 -0.92670 -0.39442 0.6575 0.4009
0.480 1.70132 0.0029 -0.09070 -1.02724 0.2529 7.5 -0.5096 0.00000 0.0297 6.75 1000 750 2.5 3.2 -0.93720 -0.38462 0.6540 0.4022
0.500 1.67127 0.0029 -0.09490 -1.01909 0.2529 7.5 -0.5096 0.00000 0.0271 6.75 1000 750 2.5 3.2 -0.94614 -0.37408 0.6512 0.4021
0.550 1.53838 0.0029 -0.10275 -0.99351 0.2529 7.5 -0.5096 0.00000 0.0245 6.75 1000 750 2.5 3.2 -0.96564 -0.35582 0.6570 0.4057
0.600 1.37505 0.0029 -0.10747 -0.96429 0.2529 7.5 -0.5096 0.00000 0.0219 6.75 1000 750 2.5 3.2 -0.98499 -0.34053 0.6630 0.406
0.650 1.21156 0.0029 -0.11262 -0.93347 0.2529 7.5 -0.5096 0.00000 0.0193 6.75 1000 750 2.5 3.2 -0.99733 -0.30949 0.6652 0.4124
0.700 1.09262 0.0029 -0.11835 -0.91162 0.2529 7.5 -0.5096 0.00000 0.0167 6.75 1000 750 2.5 3.2 -1.00469 -0.28772 0.6696 0.4135
0.750 0.95211 0.0029 -0.12347 -0.88393 0.2529 7.5 -0.5096 0.00000 0.0141 6.75 1000 750 2.5 3.2 -1.00786 -0.28957 0.6744 0.4043
0.800 0.85227 0.0029 -0.12678 -0.86884 0.2529 7.5 -0.5096 0.00000 0.0115 6.75 1000 750 2.5 3.2 -1.00606 -0.28555 0.6716 0.3974
0.850 0.76564 0.0029 -0.13133 -0.85442 0.2529 7.5 -0.5096 0.00000 0.0089 6.75 1000 750 2.5 3.2 -1.01093 -0.28364 0.6713 0.3971
0.900 0.66856 0.0029 -0.13551 -0.83929 0.2529 7.5 -0.5096 0.00000 0.0062 6.75 1000 750 2.5 3.2 -1.01576 -0.28037 0.6738 0.3986
0.950 0.58739 0.0029 -0.13957 -0.82668 0.2529 7.5 -0.5096 0.00000 0.0016 6.75 1000 750 2.5 3.2 -1.01353 -0.28390 0.6767 0.3949
1.000 0.52349 0.0029 -0.14345 -0.81838 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -1.01331 -0.28702 0.6787 0.3943
1.100 0.37680 0.0029 -0.15051 -0.79691 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -1.01240 -0.27669 0.6912 0.3806
1.200 0.23251 0.0029 -0.15527 -0.77813 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -1.00489 -0.27538 0.7015 0.3802
1.300 0.10481 0.0029 -0.16106 -0.75888 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -0.98876 -0.25008 0.7017 0.3803
1.400 0.00887 0.0029 -0.16654 -0.74871 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -0.97760 -0.23508 0.7141 0.3766
1.500 -0.01867 0.0029 -0.17187 -0.75751 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -0.98071 -0.24695 0.7164 0.3799
1.600 -0.09960 0.0029 -0.17728 -0.74823 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -0.96369 -0.22870 0.7198 0.3817
1.700 -0.21166 0.0029 -0.17908 -0.73766 0.2529 7.5 -0.5096 0.00000 0.0000 6.75 1000 750 2.5 3.2 -0.94634 -0.21655 0.7226 0.3724
1.800 -0.27300 0.0029 -0.18438 -0.72996 0.2529 7.5 -0.5096 0.00000 -0.003 6.75 1000 750 2.5 3.2 -0.93606 -0.20302 0.7241 0.371
1.900 -0.35366 0.0029 -0.18741 -0.72279 0.2529 7.5 -0.5096 0.00000 -0.006 6.75 1000 750 2.5 3.2 -0.91408 -0.18228 0.7266 0.3745
2.000 -0.42891 0.0029 -0.19029 -0.72033 0.2529 7.5 -0.5096 0.00000 -0.009 6.75 1000 750 2.5 3.2 -0.91007 -0.17336 0.7254 0.3717
2.200 -0.55307 0.0029 -0.19683 -0.71662 0.2529 7.5 -0.5096 0.00000 -0.0141 6.75 1000 750 2.5 3.2 -0.89376 -0.15463 0.7207 0.3758
2.400 -0.67806 0.0029 -0.20339 -0.70452 0.2529 7.5 -0.5096 0.00000 -0.0284 6.75 1000 750 2.5 3.2 -0.87052 -0.13181 0.7144 0.3973
2.600 -0.80494 0.0029 -0.20703 -0.69691 0.2529 7.5 -0.5096 0.00000 -0.0408 6.75 1000 750 2.5 3.2 -0.85889 -0.14066 0.7122 0.4001
2.800 -0.91278 0.0029 -0.21074 -0.69560 0.2529 7.5 -0.5096 0.00000 -0.0534 6.75 1000 750 2.5 3.2 -0.86106 -0.13882 0.7129 0.4025
3.000 -1.05642 0.0029 -0.21392 -0.69085 0.2529 7.5 -0.5096 0.00000 -0.0683 6.75 1000 750 2.5 3.2 -0.85793 -0.13336 0.6997 0.4046
3.200 -1.17715 0.0029 -0.21361 -0.67711 0.2529 7.5 -0.5096 0.00000 -0.078 6.75 1000 750 2.5 3.2 -0.82094 -0.13770 0.6820 0.4194
3.400 -1.22091 0.0029 -0.21951 -0.68177 0.2529 7.5 -0.5096 0.00000 -0.0943 6.75 1000 750 2.5 3.2 -0.84449 -0.15337 0.6682 0.3971
3.600 -1.34547 0.0029 -0.22724 -0.65918 0.2529 7.5 -0.5096 0.00000 -0.1278 6.75 1000 750 2.5 3.2 -0.83216 -0.10884 0.6508 0.4211
3.800 -1.39790 0.0029 -0.23180 -0.65298 0.2529 7.5 -0.5096 0.00000 -0.1744 6.75 1000 750 2.5 3.2 -0.79216 -0.08884 0.6389 0.415
4.000 -1.37536 0.0029 -0.23848 -0.66482 0.2529 7.5 -0.5096 0.00000 -0.2231 6.75 1000 750 2.5 3.2 -0.75645 -0.07749 0.6196 0.3566
pgv 5.61201 0.0029 -0.09980 -0.98388 0.2529 7.5 -0.5096 -0.0616 0.0630 6.75 1000 750 2.5 3.2 -0.72057 -0.19688 0.6014 0.3311
""")
class AkkarEtAlRepi2014(AkkarEtAlRjb2014):
"""
Implements GMPE developed by <NAME>, <NAME>, and <NAME>
as published in "Empirical Ground-Motion Models for Point- and Extended-
Source Crustal Earthquake Scenarios in Europe and the Middle East",
Bullettin of Earthquake Engineering (2014).
The class implements the equations for epicentral distance and based on
manuscript provided by the original authors.
"""
REQUIRES_DISTANCES = set(('repi', ))
def _compute_logarithmic_distance_term(self, C, mag, dists):
"""
Compute and return fourth term in equations (2a)
and (2b), page 20.
"""
return (
(C['a4'] + C['a5'] * (mag - self.c1)) *
np.log(np.sqrt(dists.repi ** 2 + C['a6'] ** 2))
)
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a1 a2 a3 a4 a5 a6 a7 a8 a9 c1 Vcon Vref c n b1 b2 sigma tau
pga 2.52977 0.0029 -0.05496 -1.31001 0.2529 7.5 -0.5096 -0.1091 0.0937 6.75 1000 750 2.5 3.2 -0.41997 -0.28846 0.6375 0.3581
pgv 6.13498 0.0029 -0.12091 -1.04013 0.2529 7.5 -0.5096 -0.0616 0.0630 6.75 1000 750 2.5 3.2 -0.72057 -0.19688 0.6143 0.3485
0.010 2.54832 0.0029 -0.05434 -1.31268 0.2529 7.5 -0.5096 -0.1115 0.0953 6.75 1000 750 2.5 3.2 -0.41729 -0.28685 0.6389 0.3607
0.020 2.64420 0.0029 -0.05452 -1.33135 0.2529 7.5 -0.5096 -0.1040 0.1029 6.75 1000 750 2.5 3.2 -0.39998 -0.28241 0.6434 0.3615
0.030 2.77723 0.0029 -0.05196 -1.35509 0.2529 7.5 -0.5096 -0.0973 0.1148 6.75 1000 750 2.5 3.2 -0.34799 -0.26842 0.6569 0.3617
0.040 2.92666 0.0029 -0.04657 -1.38259 0.2529 7.5 -0.5096 -0.0884 0.1073 6.75 1000 750 2.5 3.2 -0.27572 -0.24759 0.6693 0.3530
0.050 3.09355 0.0029 -0.04168 -1.41008 0.2529 7.5 -0.5096 -0.0853 0.1052 6.75 1000 750 2.5 3.2 -0.21231 -0.22385 0.6773 0.3612
0.075 3.38462 0.0029 -0.03506 -1.44268 0.2529 7.5 -0.5096 -0.0779 0.0837 6.75 1000 750 2.5 3.2 -0.14427 -0.17525 0.6791 0.3853
0.100 3.61906 0.0029 -0.03936 -1.46870 0.2529 7.5 -0.5096 -0.0749 0.0761 6.75 1000 750 2.5 3.2 -0.27064 -0.29293 0.6851 0.4160
0.110 3.66537 0.0029 -0.04081 -1.47079 0.2529 7.5 -0.5096 -0.0704 0.0707 6.75 1000 750 2.5 3.2 -0.31025 -0.31837 0.6884 0.4163
0.120 3.68544 0.0029 -0.04295 -1.46520 0.2529 7.5 -0.5096 -0.0604 0.0653 6.75 1000 750 2.5 3.2 -0.34796 -0.33860 0.6960 0.4118
0.130 3.70155 0.0029 -0.04581 -1.45986 0.2529 7.5 -0.5096 -0.0490 0.0617 6.75 1000 750 2.5 3.2 -0.39668 -0.36646 0.6997 0.4102
0.140 3.70871 0.0029 -0.04848 -1.45433 0.2529 7.5 -0.5096 -0.0377 0.0581 6.75 1000 750 2.5 3.2 -0.43996 -0.38417 0.7032 0.4028
0.150 3.70477 0.0029 -0.05156 -1.44613 0.2529 7.5 -0.5096 -0.0265 0.0545 6.75 1000 750 2.5 3.2 -0.48313 -0.39551 0.7011 0.3978
0.160 3.65565 0.0029 -0.05350 -1.42989 0.2529 7.5 -0.5096 -0.0194 0.0509 6.75 1000 750 2.5 3.2 -0.52431 -0.40869 0.6997 0.3989
0.170 3.59764 0.0029 -0.05583 -1.41110 0.2529 7.5 -0.5096 -0.0125 0.0507 6.75 1000 750 2.5 3.2 -0.55680 -0.41528 0.6970 0.4030
0.180 3.53732 0.0029 -0.05830 -1.39329 | |
import sys
import os
import decimal
import string
import logger
# template configuration: in theory this stuff could be
# modified at runtime, though in practice that seems unlikely
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.abspath(THIS_DIR + '/templates')
TEMPLATES = [ '01_header.c_template', '02_errors.c_template', '03_prototypes.c_template', '05_functions.c_template' ]
# generate the C code from a parsed model
# much of the code is unchanging boilerplate, and much of that
# is simply copied direct from several template files
# where the code is dependent on the model it is constructed
# in a bunch of subsidiary functions below -- at present
# these are a rather ugly mix of literal C strings and
# variable substitutions -- maybe look into making this
# less horrible in future?
def generateSource(model, config, template_dir=TEMPLATE_DIR):
f = open(template_dir + '/' + TEMPLATES[0])
src = f.read()
f.close()
f = open(template_dir + '/' + TEMPLATES[1])
src = src + f.read()
f.close()
src = src + generateModelVars(model, config)
f = open(template_dir + '/' + TEMPLATES[2])
src = src + f.read()
f.close()
src = src + generateEmbeds(model)
src = src + generateModelFuncs(model, config)
f = open(template_dir + '/' + TEMPLATES[3])
src = src + f.read()
f.close()
return src
# generate the model variables segment
def generateModelVars(model, config):
diffcount = len(model['diffs'])
algcount = len(model['algs'])
symcount = len(model['symlist'])
varcount = diffcount + algcount
src = '/* Model-specific constants and statics */\n'
src = src + 'const char* MODEL_NAME = "' + config['name'] + '";\n'
if model['version']:
src = src + 'const char* MODEL_VERSION = "' + model['version'] + '";\n'
else:
src = src + 'const char* MODEL_VERSION = "(version not specified)";\n'
if model['diagonal']:
src = src + 'const int DIAGONAL = 1;\n'
src = src + 'const int REQUIRE_MASS = 0;\n'
else:
src = src + 'const int DIAGONAL = 0;\n'
src = src + 'const int REQUIRE_MASS = 1;\n'
src = src + 'const unsigned int DIFF_EQ_COUNT = ' + str(diffcount) + ';\n'
src = src + 'const unsigned int ALGEBRAIC_COUNT = ' + str(algcount) + ';\n'
src = src + 'const unsigned int VAR_COUNT = ' + str(diffcount + algcount) + ';\n'
src = src + 'const unsigned int SYMBOL_COUNT = ' + str(symcount) + ';\n\n'
src = src + 'static char* SYMBOLS[' + str(symcount) + '] = \n{\n'
src = src + formatArray(model['symlist'])
src = src + '};\n\n'
src = src + 'static char* ROOTS[' + str(varcount) + '] = \n{\n'
src = src + formatArray(model['diffs'] + model['algs'])
src = src + '};\n\n'
if model['intermeds']:
src = src + 'static double INTERMEDIATES[' + str(len(model['intermeds'])) + '] = {0};\n\n'
indices = [0]
for name in model['outputs']:
indices.append(model['symbols'][name]['index'])
src = src + 'static int DEFAULT_FIELDS[' + str(len(indices)) + '] = \n{\n'
src = src + formatArray(indices, width=10, quote='')
src = src + '};\n'
src = src + 'static OutputSpec DEFAULT_OUTSPEC = { ' + str(len(indices)) + ', DEFAULT_FIELDS };\n\n'
return src
# generate segment for embedded C chunks
# (by just pasting them all together -- this stuff is not checked)
def generateEmbeds(model):
return '/* Embedded C code from the model, if any */\n\n' + '\n'.join(model['embeds']) + '\n\n'
# generate the model functions segment
def generateModelFuncs(model, config):
if config['unused']:
targets = model['assigned']
else:
targets = list(model['assigned'] - model['unused'])
src = '/* Model-specific functions */\n'
src = src + generateModelInit(model, config, targets)
src = src + generateParamUpdate(model, config, targets)
src = src + generateSaveY(model, config)
src = src + generateSaveIntermediates(model, config)
src = src + generateCarryForward(model, config)
src = src + generateRHS(model, config, targets)
src = src + generateConstraints(model, config)
return src
# generate the model initialisation code
def generateModelInit(model, config, targets):
src = '''
/* Initialise parameters with any values known at compile time.
(NB: these may be overwritten by runtime values) */
void model_init()
{
'''
if not model['diagonal']:
src = src + ' double* mass = radau5_getMassMatrix();\n\n'
if config['debug']: src = src + ' fprintf(stderr, "# Initialising parameters\\n");\n\n'
independent = model['assignments']['independent']
for ii in range(len(independent['names'])):
name = independent['names'][ii]
if name in targets:
expr = independent['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model) + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n'
dependent = model['assignments']['dependent']
for ii in range(len(dependent['names'])):
name = dependent['names'][ii]
if name in targets:
expr = dependent['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model) + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n'
src = src + '\n constrain_params();\n'
src = src + '\n carry_forward();\n'
if not model['diagonal']:
idy = 0
for item in model['diffs']:
for aux in model['auxiliaries'][item]:
auxname = aux[1]
if auxname not in model['diffs']:
logger.error('Error: auxiliary term not in diffs: ' + auxname)
else:
idx = model['diffs'].index(aux[1])
src = src + '\n /* auxiliary diff eqn term: ' + item + "' : "
src = src + str(aux[0]) + " " + auxname + "' */\n"
# idy indexes the equation, idx the crossref
# Fortran uses column-major order for matrices,
# which I *think* makes this the right way to index
src = src + ' mass[VAR_COUNT * ' + str(idx) + ' + ' + str(idy) + '] = ' + str(aux[0]) + ';\n'
idy = idy + 1
src = src + '}\n'
return src
# generate param_update function
def generateParamUpdate(model, config, targets):
src = '''
/* Propagate parameter changes to any dependent parameters */
void param_update()
{
'''
step = model['assignments']['step']
if len(step) > 0:
if config['debug']: src = src + ' fprintf(stderr, "# Updating dependent parameters:\\n");\n\n'
for ii in range(len(step['names'])):
name = step['names'][ii]
if name not in targets: continue
expr = step['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model, 'step') + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']: src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n\n'
else:
src = src + ' /* no parameters to update for this model */\n'
src = src + '}\n'
return src
def generateSaveY(model, config):
src = '''
/* Copy Y values into corresponding spaces in the RPAR array */
void save_y(double* y)
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Saving Y estimates\\n");\n'
idy = 0
for item in model['diffs'] + model['algs']:
src = src + ' /* ' + item + ' */\n'
src = src + ' RPAR[' + str(model['symbols'][item]['index']) + '] = y[' + str(idy) + '];\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + item + ' = %.17g\\n", y[' + str(idy) + ']);\n'
idy = idy + 1
src = src + '}\n'
return src
def generateSaveIntermediates(model, config):
src = '''
/* Copy intermediate variables into corresponding spaces in the RPAR array */
void save_intermediates()
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Saving intermediates\\n");\n'
idy = 0
for item in model['intermeds']:
src = src + ' /* ' + item + ' */\n'
src = src + ' RPAR[' + str(model['symbols'][item]['index']) + '] = INTERMEDIATES[' + str(idy) + '];\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + item + ' = %.17g\\n", INTERMEDIATES[' + str(idy) + ']);\n'
idy = idy + 1
src = src + '}\n'
return src
def generateCarryForward(model, config):
src = '''
/* Update Y array with corresponding values from the RPAR array */
void carry_forward()
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Setting Y | |
<reponame>imduffy15/python-androidtv<gh_stars>0
"""Constants used throughout the code.
**Links**
* `ADB key event codes <https://developer.android.com/reference/android/view/KeyEvent>`_
* `MediaSession PlaybackState property <https://developer.android.com/reference/android/media/session/PlaybackState.html>`_
"""
import re
# Intents
INTENT_LAUNCH = "android.intent.category.LAUNCHER"
INTENT_HOME = "android.intent.category.HOME"
# echo '1' if the previous shell command was successful
CMD_SUCCESS1 = r" && echo -e '1\c'"
# echo '1' if the previous shell command was successful, echo '0' if it was not
CMD_SUCCESS1_FAILURE0 = r" && echo -e '1\c' || echo -e '0\c'"
#: Get the audio state
CMD_AUDIO_STATE = r"dumpsys audio | grep paused | grep -qv 'Buffer Queue' && echo -e '1\c' || (dumpsys audio | grep started | grep -qv 'Buffer Queue' && echo '2\c' || echo '0\c')"
#: Determine whether the device is awake
CMD_AWAKE = "dumpsys power | grep mWakefulness | grep -q Awake"
#: Get the current app
CMD_CURRENT_APP = "CURRENT_APP=$(dumpsys window windows | grep mCurrentFocus) && CURRENT_APP=${CURRENT_APP#*{* * } && CURRENT_APP=${CURRENT_APP%%/*} && echo $CURRENT_APP"
#: Get the current app for a Google TV device
CMD_CURRENT_APP_GOOGLE_TV = "CURRENT_APP=$(dumpsys activity a . | grep -E 'mResumedActivity' | cut -d ' ' -f 8) && CURRENT_APP=${CURRENT_APP%%/*} && echo $CURRENT_APP"
#: Get the HDMI input
CMD_HDMI_INPUT = "dumpsys activity starter | grep -o 'HDMIInputService\\/HW[0-9]' -m 1 | grep -o 'HW[0-9]'"
#: Launch an app if it is not already the current app
CMD_LAUNCH_APP = "CURRENT_APP=$(dumpsys window windows | grep mCurrentFocus) && CURRENT_APP=${{CURRENT_APP#*{{* * }} && CURRENT_APP=${{CURRENT_APP%%/*}} && if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
#: Launch an app if it is not already the current app (for Google TV devices)
CMD_LAUNCH_APP_GOOGLE_TV = "CURRENT_APP=$(dumpsys activity a . | grep -E 'mResumedActivity' | cut -d ' ' -f 8) && CURRENT_APP=${{CURRENT_APP%%/*}} && if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
#: Get the state from ``dumpsys media_session``; this assumes that the variable ``CURRENT_APP`` has been defined
CMD_MEDIA_SESSION_STATE = "dumpsys media_session | grep -A 100 'Sessions Stack' | grep -A 100 $CURRENT_APP | grep -m 1 'state=PlaybackState {'"
#: Determine the current app and get the state from ``dumpsys media_session``
CMD_MEDIA_SESSION_STATE_FULL = CMD_CURRENT_APP + " && " + CMD_MEDIA_SESSION_STATE
#: Get the apps for an Android TV device
CMD_RUNNING_APPS_ANDROIDTV = "pm list packages | awk -F : '{print $2}'"
#: Get the apps for a Fire TV device
CMD_RUNNING_APPS_FIRETV = "pm list packages | awk -F : '{print $2}'"
#: Determine if the device is on
CMD_SCREEN_ON = "(dumpsys power | grep 'Display Power' | grep -q 'state=ON' || dumpsys power | grep -q 'mScreenOn=true')"
#: Get the "STREAM_MUSIC" block from ``dumpsys audio``
CMD_STREAM_MUSIC = r"dumpsys audio | grep '\- STREAM_MUSIC:' -A 11"
#: Get the wake lock size
CMD_WAKE_LOCK_SIZE = "dumpsys power | grep Locks | grep 'size='"
#: Get the properties for an Android TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for an Android TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for an Android TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for an Android TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Google TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for a Google TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Google TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for a Google TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Fire TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_RUNNING_APPS_FIRETV
#: Get the properties for a Fire TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo)"
#: Get the properties for a Fire TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_RUNNING_APPS_FIRETV
#: Get the properties for a Fire TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo)"
# `getprop` commands
CMD_MANUFACTURER = "getprop ro.product.manufacturer"
CMD_MODEL = "getprop ro.product.model"
CMD_SERIALNO = "getprop ro.serialno"
CMD_VERSION = "getprop ro.build.version.release"
# Commands for getting the MAC address
CMD_MAC_WLAN0 = "ip addr show wlan0 | grep -m 1 ether"
CMD_MAC_ETH0 = "ip addr show eth0 | grep -m 1 ether"
# ADB key event codes
# https://developer.android.com/reference/android/view/KeyEvent
KEY_BACK = 4
KEY_BLUE = 186
KEY_CENTER = 23
KEY_COMPONENT1 = 249
KEY_COMPONENT2 = 250
KEY_COMPOSITE1 = 247
KEY_COMPOSITE2 = 248
KEY_DOWN = 20
KEY_END = 123
KEY_ENTER = 66
KEY_ESCAPE = 111
KEY_FAST_FORWARD = 90
KEY_GREEN = 184
KEY_HDMI1 = 243
KEY_HDMI2 | |
"""
The Conductor.
The conductor is responsible for coordinating messages that are received
over the network, communicating with the ledger, passing messages to handlers,
instantiating concrete implementations of required modules and storing data in the
wallet.
"""
import hashlib
import json
import logging
import os
from ..admin.base_server import BaseAdminServer
from ..admin.server import AdminResponder, AdminServer
from ..config.default_context import ContextBuilder
from ..config.injection_context import InjectionContext
from ..config.ledger import get_genesis_transactions, ledger_config
from ..config.logging import LoggingConfigurator
from ..config.wallet import wallet_config
from ..core.profile import Profile
from ..ledger.error import LedgerConfigError, LedgerTransactionError
from ..messaging.responder import BaseResponder
from ..protocols.connections.v1_0.manager import (
ConnectionManager,
ConnectionManagerError,
)
from ..protocols.out_of_band.v1_0.manager import OutOfBandManager
from ..protocols.out_of_band.v1_0.messages.invitation import InvitationMessage
from ..transport.inbound.manager import InboundTransportManager
from ..transport.inbound.message import InboundMessage
from ..transport.outbound.base import OutboundDeliveryError
from ..transport.outbound.manager import OutboundTransportManager, QueuedOutboundMessage
from ..transport.outbound.message import OutboundMessage
from ..transport.wire_format import BaseWireFormat
from ..utils.stats import Collector
from ..multitenant.manager import MultitenantManager
from ..utils.task_queue import CompletedTask, TaskQueue
from ..wallet.base import DIDInfo
from .dispatcher import Dispatcher
LOGGER = logging.getLogger(__name__)
class Conductor:
"""
Conductor class.
Class responsible for initializing concrete implementations
of our require interfaces and routing inbound and outbound message data.
"""
def __init__(self, context_builder: ContextBuilder) -> None:
"""
Initialize an instance of Conductor.
Args:
inbound_transports: Configuration for inbound transports
outbound_transports: Configuration for outbound transports
settings: Dictionary of various settings
"""
self.admin_server = None
self.context_builder = context_builder
self.dispatcher: Dispatcher = None
self.inbound_transport_manager: InboundTransportManager = None
self.outbound_transport_manager: OutboundTransportManager = None
self.root_profile: Profile = None
self.setup_public_did: DIDInfo = None
@property
def context(self) -> InjectionContext:
"""Accessor for the injection context."""
return self.root_profile.context
async def setup(self):
"""Initialize the global request context."""
context = await self.context_builder.build_context()
# Fetch genesis transactions if necessary
await get_genesis_transactions(context.settings)
# Configure the root profile
self.root_profile, self.setup_public_did = await wallet_config(context)
context = self.root_profile.context
# Configure the ledger
if not await ledger_config(
self.root_profile, self.setup_public_did and self.setup_public_did.did
):
LOGGER.warning("No ledger configured")
# Register all inbound transports
self.inbound_transport_manager = InboundTransportManager(
self.root_profile, self.inbound_message_router, self.handle_not_returned
)
await self.inbound_transport_manager.setup()
# Register all outbound transports
self.outbound_transport_manager = OutboundTransportManager(
context, self.handle_not_delivered
)
await self.outbound_transport_manager.setup()
# Initialize dispatcher
self.dispatcher = Dispatcher(self.root_profile)
await self.dispatcher.setup()
wire_format = context.inject(BaseWireFormat, required=False)
if wire_format and hasattr(wire_format, "task_queue"):
wire_format.task_queue = self.dispatcher.task_queue
# Bind manager for multitenancy related tasks
if context.settings.get("multitenant.enabled"):
multitenant_mgr = MultitenantManager(self.root_profile)
context.injector.bind_instance(MultitenantManager, multitenant_mgr)
# Admin API
if context.settings.get("admin.enabled"):
try:
admin_host = context.settings.get("admin.host", "0.0.0.0")
admin_port = context.settings.get("admin.port", "80")
self.admin_server = AdminServer(
admin_host,
admin_port,
context,
self.root_profile,
self.outbound_message_router,
self.webhook_router,
self.stop,
self.dispatcher.task_queue,
self.get_stats,
)
webhook_urls = context.settings.get("admin.webhook_urls")
if webhook_urls:
for url in webhook_urls:
self.admin_server.add_webhook_target(url)
context.injector.bind_instance(BaseAdminServer, self.admin_server)
if "http" not in self.outbound_transport_manager.registered_schemes:
self.outbound_transport_manager.register("http")
except Exception:
LOGGER.exception("Unable to register admin server")
raise
# Fetch stats collector, if any
collector = context.inject(Collector, required=False)
if collector:
# add stats to our own methods
collector.wrap(
self,
(
# "inbound_message_router",
"outbound_message_router",
# "create_inbound_session",
),
)
# at the class level (!) should not be performed multiple times
collector.wrap(
ConnectionManager,
(
# "get_connection_targets",
"fetch_did_document",
"find_inbound_connection",
),
)
async def start(self) -> None:
"""Start the agent."""
context = self.root_profile.context
# Start up transports
try:
await self.inbound_transport_manager.start()
except Exception:
LOGGER.exception("Unable to start inbound transports")
raise
try:
await self.outbound_transport_manager.start()
except Exception:
LOGGER.exception("Unable to start outbound transports")
raise
# Start up Admin server
if self.admin_server:
try:
await self.admin_server.start()
except Exception:
LOGGER.exception("Unable to start administration API")
# Make admin responder available during message parsing
# This allows webhooks to be called when a connection is marked active,
# for example
responder = AdminResponder(
context,
self.admin_server.outbound_message_router,
self.admin_server.send_webhook,
)
context.injector.bind_instance(BaseResponder, responder)
# Get agent label
default_label = context.settings.get("default_label")
# Show some details about the configuration to the user
LoggingConfigurator.print_banner(
default_label,
self.inbound_transport_manager.registered_transports,
self.outbound_transport_manager.registered_transports,
self.setup_public_did and self.setup_public_did.did,
self.admin_server,
)
# Create a static connection for use by the test-suite
if context.settings.get("debug.test_suite_endpoint"):
async with self.root_profile.session() as session:
mgr = ConnectionManager(session)
their_endpoint = context.settings["debug.test_suite_endpoint"]
test_conn = await mgr.create_static_connection(
my_seed=hashlib.sha256(b"aries-protocol-test-subject").digest(),
their_seed=hashlib.sha256(b"aries-protocol-test-suite").digest(),
their_endpoint=their_endpoint,
alias="test-suite",
)
print("Created static connection for test suite")
print(" - My DID:", test_conn.my_did)
print(" - Their DID:", test_conn.their_did)
print(" - Their endpoint:", their_endpoint)
print()
del mgr
# Print an invitation to the terminal
if context.settings.get("debug.print_invitation"):
try:
async with self.root_profile.session() as session:
mgr = OutOfBandManager(session)
invi_rec = await mgr.create_invitation(
my_label=context.settings.get("debug.invite_label"),
public=context.settings.get("debug.invite_public", False),
multi_use=context.settings.get("debug.invite_multi_use", False),
include_handshake=True,
metadata=json.loads(
context.settings.get("debug.invite_metadata_json", "{}")
),
)
base_url = context.settings.get("invite_base_url")
invite_url = InvitationMessage.deserialize(
invi_rec.invitation
).to_url(base_url)
print("Invitation plaintext:")
print(json.dumps(invi_rec.invitation))
print("Invitation URL:")
print(invite_url, flush=True)
write_invitation_to = \
context.settings.get("write_invitation_to", None)
if write_invitation_to is not None:
expanded_path = os.path.expanduser(write_invitation_to)
with open(expanded_path, "w") as handle:
encoded_invitation = invite_url.split("?oob=")[-1]
handle.write(encoded_invitation)
print(f"Wrote invitation to {expanded_path}")
del mgr
except Exception:
LOGGER.exception("Error creating invitation")
# Print connections protocol invitation to the terminal
if context.settings.get("debug.print_connections_invitation"):
try:
async with self.root_profile.session() as session:
mgr = ConnectionManager(session)
_record, invite = await mgr.create_invitation(
my_label=context.settings.get("debug.invite_label"),
public=context.settings.get("debug.invite_public", False),
multi_use=context.settings.get("debug.invite_multi_use", False),
metadata=json.loads(
context.settings.get("debug.invite_metadata_json", "{}")
),
)
base_url = context.settings.get("invite_base_url")
print("Invitation URL (Connections protocol):")
print(invite.to_url(base_url), flush=True)
del mgr
except Exception:
LOGGER.exception("Error creating invitation")
async def stop(self, timeout=1.0):
"""Stop the agent."""
shutdown = TaskQueue()
if self.dispatcher:
shutdown.run(self.dispatcher.complete())
if self.admin_server:
shutdown.run(self.admin_server.stop())
if self.inbound_transport_manager:
shutdown.run(self.inbound_transport_manager.stop())
if self.outbound_transport_manager:
shutdown.run(self.outbound_transport_manager.stop())
# close multitenant profiles
multitenant_mgr = self.context.inject(MultitenantManager, required=False)
if multitenant_mgr:
for profile in multitenant_mgr._instances.values():
shutdown.run(profile.close())
if self.root_profile:
shutdown.run(self.root_profile.close())
await shutdown.complete(timeout)
def inbound_message_router(
self,
profile: Profile,
message: InboundMessage,
can_respond: bool = False,
):
"""
Route inbound messages.
Args:
context: The context associated with the inbound message
message: The inbound message instance
can_respond: If the session supports return routing
"""
if message.receipt.direct_response_requested and not can_respond:
LOGGER.warning(
"Direct response requested, but not supported by transport: %s",
message.transport_type,
)
# Note: at this point we could send the message to a shared queue
# if this pod is too busy to process it
try:
self.dispatcher.queue_message(
profile,
message,
self.outbound_message_router,
self.admin_server and self.admin_server.send_webhook,
lambda completed: self.dispatch_complete(message, completed),
)
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
def dispatch_complete(self, message: InboundMessage, completed: CompletedTask):
"""Handle completion of message dispatch."""
if completed.exc_info:
LOGGER.exception(
"Exception in message handler:", exc_info=completed.exc_info
)
if isinstance(completed.exc_info[1], LedgerConfigError) or isinstance(
completed.exc_info[1], LedgerTransactionError
):
LOGGER.error(
"%shutdown on ledger error %s",
"S" if self.admin_server else "No admin server to s",
str(completed.exc_info[1]),
)
if self.admin_server:
self.admin_server.notify_fatal_error()
else:
LOGGER.error(
"DON'T shutdown on %s %s",
completed.exc_info[0].__name__,
str(completed.exc_info[1]),
)
self.inbound_transport_manager.dispatch_complete(message, completed)
async def get_stats(self) -> dict:
"""Get the current stats tracked by the conductor."""
stats = {
"in_sessions": len(self.inbound_transport_manager.sessions),
"out_encode": 0,
"out_deliver": 0,
"task_active": self.dispatcher.task_queue.current_active,
"task_done": self.dispatcher.task_queue.total_done,
"task_failed": self.dispatcher.task_queue.total_failed,
"task_pending": self.dispatcher.task_queue.current_pending,
}
for m in self.outbound_transport_manager.outbound_buffer:
if m.state == QueuedOutboundMessage.STATE_ENCODE:
stats["out_encode"] += 1
if m.state == QueuedOutboundMessage.STATE_DELIVER:
stats["out_deliver"] += 1
return stats
async def outbound_message_router(
self,
profile: Profile,
outbound: OutboundMessage,
inbound: InboundMessage = None,
) -> None:
"""
Route an outbound message.
Args:
profile: The active profile for the request
message: An outbound message to be sent
inbound: The inbound message that produced this response, if available
"""
if not outbound.target and outbound.reply_to_verkey:
if not outbound.reply_from_verkey and inbound:
outbound.reply_from_verkey = inbound.receipt.recipient_verkey
# return message to an inbound session
if self.inbound_transport_manager.return_to_session(outbound):
return
if not outbound.to_session_only:
await self.queue_outbound(profile, outbound, inbound)
def handle_not_returned(self, profile: Profile, outbound: OutboundMessage):
"""Handle a message that failed delivery via an inbound session."""
try:
self.dispatcher.run_task(self.queue_outbound(profile, outbound))
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
async def queue_outbound(
self,
profile: Profile,
outbound: OutboundMessage,
inbound: InboundMessage = None,
):
"""
Queue an outbound message.
Args:
profile: The active profile
message: An outbound message to be sent
inbound: The inbound message that produced this response, if available
"""
# populate connection target(s)
if not outbound.target and not outbound.target_list and outbound.connection_id:
async with profile.session() as session:
conn_mgr = ConnectionManager(session)
try:
outbound.target_list = await self.dispatcher.run_task(
conn_mgr.get_connection_targets(
connection_id=outbound.connection_id
)
)
except ConnectionManagerError:
LOGGER.exception(
"Error preparing outbound message for transmission"
)
return
except (LedgerConfigError, LedgerTransactionError) as e:
LOGGER.error("Shutdown on ledger error %s", str(e))
if self.admin_server:
self.admin_server.notify_fatal_error()
raise
del conn_mgr
try:
self.outbound_transport_manager.enqueue_message(profile, outbound)
except OutboundDeliveryError:
LOGGER.warning("Cannot queue message for delivery, no supported transport")
self.handle_not_delivered(profile, outbound)
def handle_not_delivered(self, profile: Profile, outbound: OutboundMessage):
"""Handle a message that failed delivery via outbound transports."""
self.inbound_transport_manager.return_undelivered(outbound)
def webhook_router(
self,
topic: str,
payload: dict,
endpoint: str,
max_attempts: int = None,
metadata: dict = None,
):
"""
Route a webhook through the outbound transport manager.
Args:
topic: The webhook topic
payload: The webhook payload
endpoint: The endpoint of the webhook target
max_attempts: The maximum number of attempts
metadata: Additional metadata associated with the payload
"""
try:
self.outbound_transport_manager.enqueue_webhook(
topic, payload, endpoint, max_attempts, | |
<reponame>GalBenZvi/publication_list_generator
# Copyright 2017-2019, <NAME> and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `Tor Ed25519 certificates
<https://gitweb.torproject.org/torspec.git/tree/cert-spec.txt>`_, which are
used to for a variety of purposes...
* validating the key used to sign server descriptors
* validating the key used to sign hidden service v3 descriptors
* signing and encrypting hidden service v3 indroductory points
.. versionadded:: 1.6.0
**Module Overview:**
::
Ed25519Certificate - Ed25519 signing key certificate
| +- Ed25519CertificateV1 - version 1 Ed25519 certificate
| |- is_expired - checks if certificate is presently expired
| |- signing_key - certificate signing key
| +- validate - validates a descriptor's signature
|
|- from_base64 - decodes a base64 encoded certificate
|- to_base64 - base64 encoding of this certificate
|
|- unpack - decodes a byte encoded certificate
+- pack - byte encoding of this certificate
Ed25519Extension - extension included within an Ed25519Certificate
.. data:: CertType (enum)
Purpose of Ed25519 certificate. For more information see...
* `cert-spec.txt <https://gitweb.torproject.org/torspec.git/tree/cert-spec.txt>`_ section A.1
* `rend-spec-v3.txt <https://gitweb.torproject.org/torspec.git/tree/rend-spec-v3.txt>`_ appendix E
.. deprecated:: 1.8.0
Replaced with :data:`stem.client.datatype.CertType`
======================== ===========
CertType Description
======================== ===========
**SIGNING** signing key with an identity key
**LINK_CERT** TLS link certificate signed with ed25519 signing key
**AUTH** authentication key signed with ed25519 signing key
**HS_V3_DESC_SIGNING** hidden service v3 short-term descriptor signing key
**HS_V3_INTRO_AUTH** hidden service v3 introductory point authentication key
**HS_V3_INTRO_ENCRYPT** hidden service v3 introductory point encryption key
======================== ===========
.. data:: ExtensionType (enum)
Recognized exception types.
==================== ===========
ExtensionType Description
==================== ===========
**HAS_SIGNING_KEY** includes key used to sign the certificate
==================== ===========
.. data:: ExtensionFlag (enum)
Flags that can be assigned to Ed25519 certificate extensions.
====================== ===========
ExtensionFlag Description
====================== ===========
**AFFECTS_VALIDATION** extension affects whether the certificate is valid
**UNKNOWN** extension includes flags not yet recognized by stem
====================== ===========
"""
import base64
import binascii
import datetime
import hashlib
import re
import stem.descriptor.hidden_service
import stem.descriptor.server_descriptor
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.str_tools
from stem.client.datatype import Field, Size, split
# TODO: Importing under an alternate name until we can deprecate our redundant
# CertType enum in Stem 2.x.
from stem.client.datatype import CertType as ClientCertType
ED25519_KEY_LENGTH = 32
ED25519_HEADER_LENGTH = 40
ED25519_SIGNATURE_LENGTH = 64
SIG_PREFIX_SERVER_DESC = b'Tor router descriptor signature v1'
SIG_PREFIX_HS_V3 = b'Tor onion service descriptor sig v3'
DEFAULT_EXPIRATION_HOURS = 54 # HSv3 certificate expiration of tor
CertType = stem.util.enum.UppercaseEnum(
'SIGNING',
'LINK_CERT',
'AUTH',
'HS_V3_DESC_SIGNING',
'HS_V3_INTRO_AUTH',
'HS_V3_INTRO_ENCRYPT',
)
ExtensionType = stem.util.enum.Enum(('HAS_SIGNING_KEY', 4),)
ExtensionFlag = stem.util.enum.UppercaseEnum('AFFECTS_VALIDATION', 'UNKNOWN')
class Ed25519Extension(Field):
"""
Extension within an Ed25519 certificate.
:var stem.descriptor.certificate.ExtensionType type: extension type
:var list flags: extension attribute flags
:var int flag_int: integer encoding of the extension attribute flags
:var bytes data: data the extension concerns
"""
def __init__(self, ext_type, flag_val, data):
self.type = ext_type
self.flags = []
self.flag_int = flag_val if flag_val else 0
self.data = data
if flag_val and flag_val % 2 == 1:
self.flags.append(ExtensionFlag.AFFECTS_VALIDATION)
flag_val -= 1
if flag_val:
self.flags.append(ExtensionFlag.UNKNOWN)
if ext_type == ExtensionType.HAS_SIGNING_KEY and len(data) != 32:
raise ValueError('Ed25519 HAS_SIGNING_KEY extension must be 32 bytes, but was %i.' % len(data))
def pack(self):
encoded = bytearray()
encoded += Size.SHORT.pack(len(self.data))
encoded += Size.CHAR.pack(self.type)
encoded += Size.CHAR.pack(self.flag_int)
encoded += self.data
return bytes(encoded)
@staticmethod
def pop(content):
if len(content) < 4:
raise ValueError('Ed25519 extension is missing header fields')
data_size, content = Size.SHORT.pop(content)
ext_type, content = Size.CHAR.pop(content)
flags, content = Size.CHAR.pop(content)
data, content = split(content, data_size)
if len(data) != data_size:
raise ValueError("Ed25519 extension is truncated. It should have %i bytes of data but there's only %i." % (data_size, len(data)))
return Ed25519Extension(ext_type, flags, data), content
def __hash__(self):
return stem.util._hash_attr(self, 'type', 'flag_int', 'data', cache = True)
class Ed25519Certificate(object):
"""
Base class for an Ed25519 certificate.
:var int version: certificate format version
:var unicode encoded: base64 encoded ed25519 certificate
"""
def __init__(self, version):
self.version = version
self.encoded = None # TODO: remove in stem 2.x
@staticmethod
def unpack(content):
"""
Parses a byte encoded ED25519 certificate.
:param bytes content: encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if certificate is malformed
"""
version = Size.CHAR.pop(content)[0]
if version == 1:
return Ed25519CertificateV1.unpack(content)
else:
raise ValueError('Ed25519 certificate is version %i. Parser presently only supports version 1.' % version)
@staticmethod
def from_base64(content):
"""
Parses a base64 encoded ED25519 certificate.
:param str content: base64 encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if content is malformed
"""
content = stem.util.str_tools._to_unicode(content)
if content.startswith('-----BEGIN ED25519 CERT-----\n') and content.endswith('\n-----END ED25519 CERT-----'):
content = content[29:-27]
try:
decoded = base64.b64decode(content)
if not decoded:
raise TypeError('empty')
instance = Ed25519Certificate.unpack(decoded)
instance.encoded = content
return instance
except (TypeError, binascii.Error) as exc:
raise ValueError("Ed25519 certificate wasn't propoerly base64 encoded (%s):\n%s" % (exc, content))
def pack(self):
"""
Encoded byte representation of our certificate.
:returns: **bytes** for our encoded certificate representation
"""
raise NotImplementedError('Certificate encoding has not been implemented for %s' % type(self).__name__)
def to_base64(self, pem = False):
"""
Base64 encoded certificate data.
:param bool pem: include `PEM header/footer
<https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail>`_, for more
information see `RFC 7468 <https://tools.ietf.org/html/rfc7468>`_
:returns: **unicode** for our encoded certificate representation
"""
encoded = b'\n'.join(stem.util.str_tools._split_by_length(base64.b64encode(self.pack()), 64))
if pem:
encoded = b'-----BEGIN ED25519 CERT-----\n%s\n-----END ED25519 CERT-----' % encoded
return stem.util.str_tools._to_unicode(encoded)
@staticmethod
def _from_descriptor(keyword, attribute):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
if not block_contents or block_type != 'ED25519 CERT':
raise ValueError("'%s' should be followed by a ED25519 CERT block, but was a %s" % (keyword, block_type))
setattr(descriptor, attribute, Ed25519Certificate.from_base64(block_contents))
return _parse
def __str__(self):
return self.to_base64(pem = True)
@staticmethod
def parse(content):
return Ed25519Certificate.from_base64(content) # TODO: drop this alias in stem 2.x
class Ed25519CertificateV1(Ed25519Certificate):
"""
Version 1 Ed25519 certificate, which are used for signing tor server
descriptors.
:var stem.client.datatype.CertType type: certificate purpose
:var int type_int: integer value of the certificate purpose
:var datetime expiration: expiration of the certificate
:var int key_type: format of the key
:var bytes key: key content
:var list extensions: :class:`~stem.descriptor.certificate.Ed25519Extension` in this certificate
:var bytes signature: certificate signature
:param bytes signature: pre-calculated certificate signature
:param cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey signing_key: certificate signing key
"""
def __init__(self, cert_type = None, expiration = None, key_type = None, key = None, extensions = None, signature = None, signing_key = None):
super(Ed25519CertificateV1, self).__init__(1)
if cert_type is None:
raise ValueError('Certificate type is required')
elif key is None:
raise ValueError('Certificate key is required')
self.type, self.type_int = ClientCertType.get(cert_type)
self.expiration = expiration if expiration else datetime.datetime.utcnow() + datetime.timedelta(hours = DEFAULT_EXPIRATION_HOURS)
self.key_type = key_type if key_type else 1
self.key = stem.util._pubkey_bytes(key)
self.extensions = extensions if extensions else []
self.signature = signature
if signing_key:
calculated_sig = signing_key.sign(self.pack())
# if caller provides both signing key *and* signature then ensure they match
if self.signature and self.signature != calculated_sig:
raise ValueError("Signature calculated from its key (%s) mismatches '%s'" % (calculated_sig, self.signature))
self.signature = calculated_sig
if self.type in (ClientCertType.LINK, ClientCertType.IDENTITY, ClientCertType.AUTHENTICATE):
raise ValueError('Ed25519 certificate cannot have a type of %i. This is reserved for CERTS cells.' % self.type_int)
elif self.type == ClientCertType.ED25519_IDENTITY:
raise ValueError('Ed25519 certificate cannot have a type of 7. This is reserved for RSA identity cross-certification.')
elif self.type == ClientCertType.UNKNOWN:
raise ValueError('Ed25519 certificate type %i is unrecognized' % self.type_int)
def pack(self):
encoded = bytearray()
encoded += Size.CHAR.pack(self.version)
encoded += Size.CHAR.pack(self.type_int)
encoded += Size.LONG.pack(int(stem.util.datetime_to_unix(self.expiration) / 3600))
encoded += Size.CHAR.pack(self.key_type)
encoded += self.key
encoded += Size.CHAR.pack(len(self.extensions))
for extension in self.extensions:
encoded += extension.pack()
if self.signature:
encoded += self.signature
return bytes(encoded)
@staticmethod
def unpack(content):
if len(content) < ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH:
raise ValueError('Ed25519 certificate was %i bytes, but should be at least %i' % (len(content), ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH))
header, signature = split(content, len(content) - ED25519_SIGNATURE_LENGTH)
version, header = Size.CHAR.pop(header)
cert_type, header = Size.CHAR.pop(header)
expiration_hours, header = Size.LONG.pop(header)
key_type, header = Size.CHAR.pop(header)
key, header = split(header, ED25519_KEY_LENGTH)
extension_count, extension_data = Size.CHAR.pop(header)
if version != 1:
raise ValueError('Ed25519 v1 parser cannot read version %i certificates' % version)
extensions = []
for i in range(extension_count):
extension, extension_data = Ed25519Extension.pop(extension_data)
extensions.append(extension)
if extension_data:
raise ValueError('Ed25519 certificate had %i bytes of unused extension data' % len(extension_data))
return Ed25519CertificateV1(cert_type, datetime.datetime.utcfromtimestamp(expiration_hours * 3600), key_type, key, extensions, signature)
def is_expired(self):
"""
Checks if this certificate is presently expired or not.
:returns: **True** if the certificate has expired, **False** otherwise
"""
return datetime.datetime.now() > self.expiration
def signing_key(self):
"""
Provides this certificate's signing key.
.. versionadded:: 1.8.0
:returns: **bytes** with the first signing key | |
u0 {1,S}
4 Ct u0 {1,S}
5 H u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=SC=SH",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 H u0 {1,S}
6 S2d u0 {2,D}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1178,
label = "Cs-C=SCsCsH",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 H u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.78,6.25,7.44,8.35,9.57,10.31,11.2],'cal/(mol*K)'),
H298 = (-0.78,'kcal/mol'),
S298 = (-11.46,'cal/(mol*K)'),
),
shortDesc = u"""CBS-QB3 GA 1D-HR <NAME> 2010""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CtCtC=SH",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Ct u0 {1,S}
4 Ct u0 {1,S}
5 H u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-CbCbC=SH",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {6,D}
3 Cb u0 {1,S}
4 Cb u0 {1,S}
5 H u0 {1,S}
6 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cd)H",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 H u0 {1,S}
6 C u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cds)H",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 H u0 {1,S}
6 Cd u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd)H",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CS u0 {1,S} {7,D}
3 CS u0 {1,S} {8,D}
4 Cd u0 {1,S} {6,D}
5 H u0 {1,S}
6 Cdd u0 {4,D}
7 S2d u0 {2,D}
8 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd-S2d)H",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 H u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
9 S2d u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cs-C=SC=S(Cds-Cdd-Cd)H",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {6,S}
2 Cd u0 {1,S} {5,D}
3 CS u0 {1,S} {7,D}
4 CS u0 {1,S} {8,D}
5 Cdd u0 {2,D} {9,D}
6 H u0 {1,S}
7 S2d u0 {3,D}
8 S2d u0 {4,D}
9 C u0 {5,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 519,
label = "Cs-CCCC",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 C u0 {1,S}
3 C u0 {1,S}
4 C u0 {1,S}
5 C u0 {1,S}
""",
thermo = u'Cs-CsCsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 520,
label = "Cs-CsCsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cs u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.37,6.13,7.36,8.12,8.77,8.76,8.12],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (0.5,'kcal/mol','+|-',0.27),
S298 = (-35.1,'cal/(mol*K)','+|-',0.15),
),
shortDesc = u"""Cs-CsCsCsCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 521,
label = "Cs-CdsCsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 [Cd,CO] u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)CsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 522,
label = "Cs-(Cds-O2d)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 O2d u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([22.68,27.48,30.12,31.51,32.36,32.39,32.42],'J/(mol*K)','+|-',[3.34,3.34,3.34,3.34,3.34,3.34,3.34]),
H298 = (4.6,'kJ/mol','+|-',2.85),
S298 = (-140.94,'J/(mol*K)','+|-',3.9),
),
shortDesc = u"""\Derived from CBS-QB3 calculation with 1DHR treatment""",
longDesc =
u"""
Derived using calculations at B3LYP/6-311G(d,p)/CBS-QB3 level of theory. 1DH-rotors
optimized at the B3LYP/6-31G(d).Paraskevas et al, Chem. Eur. J. 2013, 19, 16431-16452,
DOI: 10.1002/chem.201301381
""",
)
entry(
index = 523,
label = "Cs-(Cds-Cd)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 C u0 {2,D}
""",
thermo = u'Cs-(Cds-Cds)CsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 524,
label = "Cs-(Cds-Cds)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cd u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([3.99,6.04,7.43,8.26,8.92,8.96,8.23],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (1.68,'kcal/mol','+|-',0.27),
S298 = (-34.72,'cal/(mol*K)','+|-',0.15),
),
shortDesc = u"""Cs-CdCsCsCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 525,
label = "Cs-(Cds-Cdd)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cdd u0 {2,D}
""",
thermo = u'Cs-(Cds-Cdd-Cd)CsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 526,
label = "Cs-(Cds-Cdd-O2d)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
7 O2d u0 {3,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([20.63,27.65,31.98,34.41,36.16,36.25,35.2],'J/(mol*K)','+|-',[6.93,6.93,6.93,6.93,6.93,6.93,6.93]),
H298 = (-4.5,'kJ/mol','+|-',5.9),
S298 = (-144.08,'J/(mol*K)','+|-',8.08),
),
shortDesc = u"""\Derived from CBS-QB3 calculation with 1DHR treatment""",
longDesc =
u"""
Derived using calculations at B3LYP/6-311G(d,p)/CBS-QB3 level of theory. 1DH-rotors
optimized at the B3LYP/6-31G(d).Paraskevas et al, Chem. Eur. J. 2013, 19, 16431-16452,
DOI: 10.1002/chem.201301381
""",
)
entry(
index = -1,
label = "Cs-(Cds-Cdd-S2d)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
7 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 527,
label = "Cs-(Cds-Cdd-Cd)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {4,S} {5,S} {6,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {7,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 Cs u0 {1,S}
7 C u0 {3,D}
""",
thermo = u'Cs-(Cds-Cds)CsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 1866,
label = "Cs-(CdN3d)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cd u0 {1,S} {6,D} {7,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 N3d u0 {2,D}
7 R u0 {2,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([5.3,6.6,7.3,7.5,7.8,7.8,7.7],'cal/(mol*K)','+|-',[1.2,1.2,1.2,1.2,1.2,1.2,1.2]),
H298 = (0.6,'kcal/mol','+|-',1.7),
S298 = (-33.5,'cal/(mol*K)','+|-',1.6),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 528,
label = "Cs-CtCsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.37,6.79,8.09,8.78,9.19,8.96,7.63],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (2.81,'kcal/mol','+|-',0.27),
S298 = (-35.18,'cal/(mol*K)','+|-',0.15),
),
shortDesc = u"""Cs-CtCsCsCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 1834,
label = "Cs-(CtN3t)CsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Ct u0 {1,S} {6,T}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 N3t u0 {2,T}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([11.4,13.4,14.6,15.3,16.3,16.7,17.2],'cal/(mol*K)','+|-',[1,1,1,1,1,1,1]),
H298 = (28.3,'kcal/mol','+|-',1.3),
S298 = (-3,'cal/(mol*K)','+|-',1.2),
),
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 529,
label = "Cs-CbCsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 Cb u0 {1,S}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.37,6.79,8.09,8.78,9.19,8.96,7.63],'cal/(mol*K)','+|-',[0.13,0.13,0.13,0.13,0.13,0.13,0.13]),
H298 = (2.81,'kcal/mol','+|-',0.26),
S298 = (-35.18,'cal/(mol*K)','+|-',0.13),
),
shortDesc = u"""Cs-CbCsCsCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 530,
label = "Cs-CdsCdsCsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 [Cd,CO] u0 {1,S}
3 [Cd,CO] u0 {1,S}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
""",
thermo = u'Cs-(Cds-Cds)(Cds-Cds)CsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 531,
label = "Cs-(Cds-O2d)(Cds-O2d)CsCs",
group =
"""
1 * Cs u0 {2,S} {3,S} {4,S} {5,S}
2 CO u0 {1,S} {6,D}
3 CO u0 {1,S} {7,D}
4 Cs u0 {1,S}
5 Cs u0 {1,S}
6 O2d u0 {2,D}
7 O2d u0 {3,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([33.76,33.42,32.6,31.91,31.01,30.55,30.35],'J/(mol*K)','+|-',[5.08,5.08,5.08,5.08,5.08,5.08,5.08]),
H298 = (14.9,'kJ/mol','+|-',4.33),
S298 = (-146.69,'J/(mol*K)','+|-',5.92),
| |
!= len([t for t in notebook_block_types if t == 'cell']):
errwarn('*** error: internal error')
errwarn(' each code block should have a code environment')
_abort()
# Go through tex_blocks and wrap math blocks in $$
# (doconce.py runs align2equations so there are no align/align*
# environments in tex blocks)
label2tag = {}
tag_counter = 1
for i in range(len(tex_blocks)):
# Extract labels and add tags
labels = re.findall(r'label\{(.+?)\}', tex_blocks[i])
for label in labels:
label2tag[label] = tag_counter
# Insert tag to get labeled equation
tex_blocks[i] = tex_blocks[i].replace(
'label{%s}' % label, 'label{%s} \\tag{%s}' % (label, tag_counter))
tag_counter += 1
# Remove \[ and \] or \begin/end{equation*} in single equations
tex_blocks[i] = tex_blocks[i].replace(r'\[', '')
tex_blocks[i] = tex_blocks[i].replace(r'\]', '')
tex_blocks[i] = tex_blocks[i].replace(r'\begin{equation*}', '')
tex_blocks[i] = tex_blocks[i].replace(r'\end{equation*}', '')
# Check for illegal environments
m = re.search(r'\\begin\{(.+?)\}', tex_blocks[i])
if m:
envir = m.group(1)
if envir not in ('equation', 'equation*', 'align*', 'align',
'array'):
errwarn(
('\n*** warning: latex envir \\begin{%s} does not work well in Markdown.'
' Stick to \\[ ... \\], equation, equation*, align, or align*'
' environments in math environments.') % envir)
eq_type = 'heading' # or '$$'
eq_type = '$$'
# Markdown: add $$ on each side of the equation
if eq_type == '$$':
# Make sure there are no newline after equation
tex_blocks[i] = '$$\n' + tex_blocks[i].strip() + '\n$$'
# Here: use heading (###) and simple formula (remove newline
# in math expressions to keep everything within a heading) as
# the equation then looks bigger
elif eq_type == 'heading':
tex_blocks[i] = '### $ ' + ' '.join(tex_blocks[i].splitlines()) + ' $'
# Add labels for the eqs above the block (for reference)
if labels:
#label_tp = '<a name="%s"></a>'
label_tp = '<div id="%s"></div>'
tex_blocks[i] = '<!-- Equation labels as ordinary links -->\n' + \
' '.join([label_tp % label
for label in labels]) + '\n\n' + \
tex_blocks[i]
# blocks is now a list of text chunks in markdown and math/code line
# instructions. Insert code and tex blocks
for i in range(len(notebook_blocks)):
if _CODE_BLOCK in notebook_blocks[i] or _MATH_BLOCK in notebook_blocks[i]:
words = notebook_blocks[i].split()
# start of notebook_blocks[i]: number block-indicator code-type
n = int(words[0])
if _CODE_BLOCK in notebook_blocks[i]:
notebook_blocks[i] = code_blocks[n] # can be list!
if _MATH_BLOCK in notebook_blocks[i]:
notebook_blocks[i] = tex_blocks[n]
# Prepend the doconce header and command to the first text cell
intro = _doconce_header + '\n'
intro += _doconce_command % ('ipynb', globals.filename, ' '.join(sys.argv[1:]))
intro = INLINE_TAGS_SUBST[format]['comment'] % intro + '\n\n'
ind = next((i for i, type in enumerate(notebook_block_types) if type == 'text'), -1)
if ind > -1:
notebook_blocks[ind] = intro + notebook_blocks[ind]
global new_code_cell, new_markdown_cell, new_notebook, new_output
cells = [] # ipynb cells
#mdstr = [] # plain md format of the notebook
execution_count = 1
kernel_client = None # Placeholder for a JupyterKernelClient instance
if option("execute"):
kernel_client = JupyterKernelClient(syntax='python')
editable_md = True # Metadata for md text
if option('ipynb_non_editable_text'):
editable_md = False
j = 0
for i, block in enumerate(notebook_blocks):
block_tp = notebook_block_types[i]
if (block_tp == 'text' or block_tp == 'math') and block != '' and block != '<!-- -->':
block = re.sub(r"caption\{(.*)\}", r"*Figure: \1*", block)
cells.append(new_markdown_cell(source=block, metadata=dict(editable=editable_md)))
#mdstr.append(('markdown', block))
elif block_tp == 'cell':
# TODO: now I need to handle -hid also in 'markdown' formats. The logic on postfixes should actually be
# separated from LANG
LANG, codetype, postfix, postfix_err = get_code_block_args('!bc ' + notebook_block_envir[j])
j += 1
execute, show = get_execute_show((LANG, codetype, postfix))
current_code_envir = LANG + codetype + postfix + postfix_err or 'dat'
if show == 'output' and block != '' and not option('ignore_output'):
# Process cells with output block type (`-out` postfix)
block = block.rstrip()
outputs = [
new_output(
output_type="execute_result",
data={
"text/plain": [
block
]
},
execution_count=execution_count-1
)
]
previous_cell = cells[-1]
if previous_cell.cell_type == "code":
previous_cell.outputs = outputs
else:
print("WARNING: DocOnce ipynb got code output,",
"but previous was not code.")
cell = new_code_cell(
source="#",
outputs=outputs,
execution_count=execution_count,
metadata=dict(collapsed=False)
)
cells.append(cell)
#mdstr.append(('codecell', block))
else:
# Process cells with block type cell (py*), (*-e), (*-hid)
cells_output, execution_count, error = execute_code_block(block,
current_code_envir,
kernel_client,
execution_count)
# Warn and abort on code errors
if error != '':
if option('execute=') == 'abort':
errwarn('*** error: Error in code block:')
else:
errwarn('*** Warning: found error in code block:')
errwarn(' %s' % error)
errwarn('***')
if option('execute=') == 'abort' and not postfix_err:
_abort()
# Add the cell, except when the `-e` postfix is used. `-hid` is just a collapsed cell
if postfix != '-e':
#mdstr.extend(md_out)
for cell in cells_output:
if cell:
cells.append(cell)
# Replace the random id with a reproducible hash of the content (issue #213)
# nbformat 5.1.3 creates random cell ID with `uuid.uuid4().hex[:8]`
for i in range(len(cells)):
if 'id' in cell.keys():
cell_content = str(i) + cells[i]['source']
cells[i]['id'] = hashlib.sha224(cell_content.encode()).hexdigest()[:8]
# Create the notebook in string format
nb = new_notebook(cells=cells)
filestr = writes(nb, version=4)
# Check that there are no empty cells:
if '"input": []' in filestr:
errwarn('*** error: empty cells in notebook - report bug in DocOnce')
_abort()
# must do the replacements here at the very end when json is written out
# \eqref and labels will not work, but labels (only in math) do no harm
filestr = re.sub(r'([^\\])label\{', r'\g<1>\\\\label{', filestr,
flags=re.MULTILINE)
# \\eqref{} just gives (???) link at this stage - future versions
# will probably support labels
#filestr = re.sub(r'\(ref\{(.+?)\}\)', r'\\eqref{\g<1>}', filestr)
# Now we use explicit references to tags
def subst(m):
label = m.group(1)
try:
return r'[(%s)](#%s)' % (label2tag[label], label)
except KeyError as e:
errwarn('*** error: label "%s" is not defined' % str(e))
#filestr = re.sub(r'\(ref\{(.+?)\}\)', subst, filestr)
# pandoc_ref_and_label replaces ref{%s} with [%s](#%s), where label is inserted
# we want the link to display the equation number instead of the label
for label, tag in label2tag.items():
filestr = filestr.replace("[%s](#%s)" % (label, label), "[%s](#%s)" % (tag, label))
"""
# MathJax reference to tag (recall that the equations have both label
# and tag (know that tag only works well in HTML, but this mjx-eqn-no
# label does not work in ipynb)
filestr = re.sub(r'\(ref\{(.+?)\}\)',
lambda m: r'[(%s)](#mjx-eqn-%s)' % (label2tag[m.group(1)], label2tag[m.group(1)]), filestr)
"""
#filestr = re.sub(r'\(ref\{(.+?)\}\)', r'Eq (\g<1>)', filestr)
'''
# Final fixes: replace all text between cells by markdown code cells
# Note: the patterns are overlapping so a plain re.sub will not work,
# here we run through all blocks found and subsitute the first remaining
# one, one by one.
pattern = r' \},\n(.+?)\{\n "cell_type":'
begin_pattern = r'^(.+?)\{\n "cell_type":'
remaining_block_begin = re.findall(begin_pattern, filestr, flags=re.DOTALL)
remaining_blocks = re.findall(pattern, filestr, flags=re.DOTALL)
import string
for block in remaining_block_begin + remaining_blocks:
filestr = string.replace(filestr, block, json_markdown(block) + ' ',
maxreplace=1)
filestr_end = re.sub(r' \{\n "cell_type": .+?\n \},\n', '', filestr,
flags=re.DOTALL)
filestr = filestr.replace(filestr_end, json_markdown(filestr_end))
filestr = ('{'
' "metadata": {'
' "name": "<NAME>"'
' },'
' "nbformat": 3,'
' "nbformat_minor": 0,'
' "worksheets": ['
' {'
' "cells": [') + \
filestr.rstrip() + '\n'+ \
json_pycode('', final_prompt_no+1, 'python').rstrip()[:-1] + \
(''
' ],'
' "metadata": {}'
' }'
' ]'
'}')
'''
if option("execute"):
kernel_client.stop()
return filestr
def execute_code_block(block, current_code_envir, kernel_client, execution_count):
"""Execute a code block and return the ipynb output
Execute a code block in a jupyter kernel. Return the ipynb output for the ipynb format
together with the execution count and any code error
:param str block: code block
:param str current_code_envir: code environment LANG + codetype + postfix + postfix_err
:param JupyterKernelClient kernel_client: instance of JupyterKernelClient
:param int execution_count: execution count in the rendered cell
:return: notebook cells, execution_count, error
:rtype List[NotebookNode], int, str
"""
#md_out = []
cell = None
cells = []
error = ''
editable = True
collapsed = False
if option('ipynb_non_editable_code'):
editable = False
LANG, codetype, postfix, postfix_err = get_code_block_args('!bc ' + current_code_envir)
# The `-hid` postfix collapses the cell
if postfix == '-hid':
collapsed = True
# Start processing the block
if not isinstance(block, list):
block = [block]
for blockline in block:
blockline = blockline.rstrip()
if | |
36)
# #f0a1a8 合欢红
hex['HEHUANHONG'] = hex['hehuanhong'] = '#f0a1a8'
HEHUANHONG = hehuanhong = (240, 161, 168)
# #f1939c 春梅红
hex['CHUNMEIHONG'] = hex['chunmeihong'] = '#f1939c'
CHUNMEIHONG = chunmeihong = (241, 147, 156)
# #f07c82 香叶红
hex['XIANGYEHONG'] = hex['xiangyehong'] = '#f07c82'
XIANGYEHONG = xiangyehong = (240, 124, 130)
# #f04a3a 珊瑚红
hex['SHANHUHONG'] = hex['shanhuhong'] = '#f04a3a'
SHANHUHONG = shanhuhong = (240, 74, 58)
# #f13c22 萝卜红
hex['LUOBOHONG'] = hex['luobohong'] = '#f13c22'
LUOBOHONG = luobohong = (241, 60, 34)
# #e77c8e 淡茜红
hex['DANQIANHONG'] = hex['danqianhong'] = '#e77c8e'
DANQIANHONG = danqianhong = (231, 124, 142)
# #ed5a65 艳红
hex['YANHONG'] = hex['yanhong'] = '#ed5a65'
YANHONG = yanhong = (237, 90, 101)
# #ed4845 淡菽红
hex['DANSHUHONG'] = hex['danshuhong'] = '#ed4845'
DANSHUHONG = danshuhong = (237, 72, 69)
# #ed3b2f 鱼鳃红
hex['YUSAIHONG'] = hex['yusaihong'] = '#ed3b2f'
YUSAIHONG = yusaihong = (237, 59, 47)
# #ed3321 樱桃红
hex['YINGTAOHONG'] = hex['yingtaohong'] = '#ed3321'
YINGTAOHONG = yingtaohong = (237, 51, 33)
# #ee4866 淡蕊香红
hex['DANRUIXIANGHONG'] = hex['danruixianghong'] = '#ee4866'
DANRUIXIANGHONG = danruixianghong = (238, 72, 102)
# #ee4863 石竹红
hex['SHIZHUHONG'] = hex['shizhuhong'] = '#ee4863'
SHIZHUHONG = shizhuhong = (238, 72, 99)
# #ef475d 草茉莉红
hex['CAOMOLIHONG'] = hex['caomolihong'] = '#ef475d'
CAOMOLIHONG = caomolihong = (239, 71, 93)
# #ee3f4d 茶花红
hex['CHAHUAHONG'] = hex['chahuahong'] = '#ee3f4d'
CHAHUAHONG = chahuahong = (238, 63, 77)
# #ed3333 枸枢红
hex['GOUSHUHONG'] = hex['goushuhong'] = '#ed3333'
GOUSHUHONG = goushuhong = (237, 51, 51)
# #ec2b24 秋海棠红
hex['QIUHAITANGHONG'] = hex['qiuhaitanghong'] = '#ec2b24'
QIUHAITANGHONG = qiuhaitanghong = (236, 43, 36)
# #eb261a 丽春红
hex['LICHUNHONG'] = hex['lichunhong'] = '#eb261a'
LICHUNHONG = lichunhong = (235, 38, 26)
# #de2a18 夕阳红
hex['XIYANGHONG'] = hex['xiyanghong'] = '#de2a18'
XIYANGHONG = xiyanghong = (222, 42, 24)
# #d42517 鹤顶红
hex['HEDINGHONG'] = hex['hedinghong'] = '#d42517'
HEDINGHONG = hedinghong = (212, 37, 23)
# #ab372f 鹅血石红
hex['EXUESHIHONG'] = hex['exueshihong'] = '#ab372f'
EXUESHIHONG = exueshihong = (171, 55, 47)
# #ac1f18 覆盆子红
hex['FUPENZIHONG'] = hex['fupenzihong'] = '#ac1f18'
FUPENZIHONG = fupenzihong = (172, 31, 24)
# #5d3131 貂紫
hex['DIAOZI'] = hex['diaozi'] = '#5d3131'
DIAOZI = diaozi = (93, 49, 49)
# #5c2223 暗玉紫
hex['ANYUZI'] = hex['anyuzi'] = '#5c2223'
ANYUZI = anyuzi = (92, 34, 35)
# #5a191b 栗紫
hex['LIZI'] = hex['lizi'] = '#5a191b'
LIZI = lizi = (90, 25, 27)
# #5a1216 葡萄酱紫
hex['PUTAOJIANGZI'] = hex['putaojiangzi'] = '#5a1216'
PUTAOJIANGZI = putaojiangzi = (90, 18, 22)
# #eea2a4 牡丹粉红
hex['MUDANFENHONG'] = hex['mudanfenhong'] = '#eea2a4'
MUDANFENHONG = mudanfenhong = (238, 162, 164)
# #ed556a 山茶红
hex['SHANCHAHONG'] = hex['shanchahong'] = '#ed556a'
SHANCHAHONG = shanchahong = (237, 85, 106)
# #f03752 海棠红
hex['HAITANGHONG'] = hex['haitanghong'] = '#f03752'
HAITANGHONG = haitanghong = (240, 55, 82)
# #c04851 玉红
hex['YUHONG'] = hex['yuhong'] = '#c04851'
YUHONG = yuhong = (192, 72, 81)
# #c02c38 高粱红
hex['GAOLIANGHONG'] = hex['gaolianghong'] = '#c02c38'
GAOLIANGHONG = gaolianghong = (192, 44, 56)
# #a7535a 满江红
hex['MANJIANGHONG'] = hex['manjianghong'] = '#a7535a'
MANJIANGHONG = manjianghong = (167, 83, 90)
# #7c1823 枣红
hex['ZAOHONG'] = hex['zaohong'] = '#7c1823'
ZAOHONG = zaohong = (124, 24, 35)
# #4c1f24 葡萄紫
hex['PUTAOZI'] = hex['putaozi'] = '#4c1f24'
PUTAOZI = putaozi = (76, 31, 36)
# #4d1018 酱紫
hex['JIANGZI'] = hex['jiangzi'] = '#4d1018'
JIANGZI = jiangzi = (77, 16, 24)
# #ee2746 淡曙红
hex['DANSHUHONG'] = hex['danshuhong'] = '#ee2746'
DANSHUHONG = danshuhong = (238, 39, 70)
# #de1c31 唐菖蒲红
hex['TANGCHANGPUHONG'] = hex['tangchangpuhong'] = '#de1c31'
TANGCHANGPUHONG = tangchangpuhong = (222, 28, 49)
# #d11a2d 鹅冠红
hex['EGUANHONG'] = hex['eguanhong'] = '#d11a2d'
EGUANHONG = eguanhong = (209, 26, 45)
# #c45a65 莓红
hex['MEIHONG'] = hex['meihong'] = '#c45a65'
MEIHONG = meihong = (196, 90, 101)
# #c21f30 枫叶红
hex['FENGYEHONG'] = hex['fengyehong'] = '#c21f30'
FENGYEHONG = fengyehong = (194, 31, 48)
# #a61b29 苋菜红
hex['XIANCAIHONG'] = hex['xiancaihong'] = '#a61b29'
XIANCAIHONG = xiancaihong = (166, 27, 41)
# #894e54 烟红
hex['YANHONG'] = hex['yanhong'] = '#894e54'
YANHONG = yanhong = (137, 78, 84)
# #82202b 暗紫苑红
hex['ANZIYUANHONG'] = hex['anziyuanhong'] = '#82202b'
ANZIYUANHONG = anziyuanhong = (130, 32, 43)
# #82111f 殷红
hex['YANHONG'] = hex['yanhong'] = '#82111f'
YANHONG = yanhong = (130, 17, 31)
# #541e24 猪肝紫
hex['ZHUGANZI'] = hex['zhuganzi'] = '#541e24'
ZHUGANZI = zhuganzi = (84, 30, 36)
# #500a16 金鱼紫
hex['JINYUZI'] = hex['jinyuzi'] = '#500a16'
JINYUZI = jinyuzi = (80, 10, 22)
# #f8ebe6 草珠红
hex['CAOZHUHONG'] = hex['caozhuhong'] = '#f8ebe6'
CAOZHUHONG = caozhuhong = (248, 235, 230)
# #ec7696 淡绛红
hex['DANJIANGHONG'] = hex['danjianghong'] = '#ec7696'
DANJIANGHONG = danjianghong = (236, 118, 150)
# #ef3473 品红
hex['PINHONG'] = hex['pinhong'] = '#ef3473'
PINHONG = pinhong = (239, 52, 115)
# #ea7293 凤仙花红
hex['FENGXIANHUAHONG'] = hex['fengxianhuahong'] = '#ea7293'
FENGXIANHUAHONG = fengxianhuahong = (234, 114, 147)
# #ec9bad 粉团花红
hex['FENTUANHUAHONG'] = hex['fentuanhuahong'] = '#ec9bad'
FENTUANHUAHONG = fentuanhuahong = (236, 155, 173)
# #eb507e 夹竹桃红
hex['JIAZHUTAOHONG'] = hex['jiazhutaohong'] = '#eb507e'
JIAZHUTAOHONG = jiazhutaohong = (235, 80, 126)
# #ed2f6a 榲桲红
hex['WENPOHONG'] = hex['wenpohong'] = '#ed2f6a'
WENPOHONG = wenpohong = (237, 47, 106)
# #eeb8c3 姜红
hex['JIANGHONG'] = hex['jianghong'] = '#eeb8c3'
JIANGHONG = jianghong = (238, 184, 195)
# #ea517f 莲瓣红
hex['LIANBANHONG'] = hex['lianbanhong'] = '#ea517f'
LIANBANHONG = lianbanhong = (234, 81, 127)
# #f1c4cd 水红
hex['SHUIHONG'] = hex['shuihong'] = '#f1c4cd'
SHUIHONG = shuihong = (241, 196, 205)
# #ec8aa4 报春红
hex['BAOCHUNHONG'] = hex['baochunhong'] = '#ec8aa4'
BAOCHUNHONG = baochunhong = (236, 138, 164)
# #ce5777 月季红
hex['YUEJIHONG'] = hex['yuejihong'] = '#ce5777'
YUEJIHONG = yuejihong = (206, 87, 109)
# #ed9db2 豇豆红
hex['JIANGDOUHONG'] = hex['jiangdouhong'] = '#ed9db2'
JIANGDOUHONG = jiangdouhong = (237, 157, 178)
# #ef82a0 霞光红
hex['XIAGUANGHONG'] = hex['xiaguanghong'] = '#ef82a0'
XIAGUANGHONG = xiaguanghong = (239, 130, 160)
# #eb3c70 松叶牡丹红
hex['SONGYEMUDANHONG'] = hex['songyemudanhong'] = '#eb3c70'
SONGYEMUDANHONG = songyemudanhong = (235, 60, 112)
# #ec2c64 喜蛋红
hex['XIDANHONG'] = hex['xidanhong'] = '#ec2c64'
XIDANHONG = xidanhong = (236, 44, 100)
# #e3b4b8 鼠鼻红
hex['SHUBIHONG'] = hex['shubihong'] = '#e3b4b8'
SHUBIHONG = shubihong = (227, 180, 184)
# #cc163a 尖晶玉红
hex['JIANJINGYUHONG'] = hex['jianjingyuhong'] = '#cc163a'
JIANJINGYUHONG = jianjingyuhong = (204, 22, 58)
# #c27c88 山黎豆红
hex['SHANLIDOUHONG'] = hex['shanlidouhong'] = '#c27c88'
SHANLIDOUHONG = shanlidouhong = (194, 124, 136)
# #bf3553 锦葵红
hex['JINKUIHONG'] = hex['jinkuihong'] = '#bf3553'
JINKUIHONG = jinkuihong = (191, 53, 83)
# #73575c 鼠背灰
hex['SHUBEIHUI'] = hex['shubeihui'] = '#73575c'
SHUBEIHUI = shubeihui = (115, 87, 92)
# #621624 甘蔗紫
hex['GANZHEZI'] = hex['ganzhezi'] = '#621624'
GANZHEZI = ganzhezi = (98, 22, 36)
# #63071c 石竹紫
hex['SHIZHUZI'] = hex['shizhuzi'] = '#63071c'
SHIZHUZI = shizhuzi = (99, 7, 28)
# #36282b 苍蝇灰
hex['CANGYINGHUI'] = hex['cangyinghui'] = '#36282b'
CANGYINGHUI = cangyinghui = (54, 40, 43)
# #30161c 卵石紫
hex['LUANSHIZI'] = hex['luanshizi'] = '#30161c'
LUANSHIZI = luanshizi = (48, 22, 28)
# #2b1216 李紫
hex['LIZI'] = hex['lizi'] = '#2b1216'
LIZI = lizi = (43, 18, 22)
# #2d0c13 茄皮紫
hex['QIEPIZI'] = hex['qiepizi'] = '#2d0c13'
QIEPIZI = qiepizi = (45, 12, 19)
# #ce5e8a 吊钟花红
hex['DIAOZHONGHUAHONG'] = hex['diaozhonghuahong'] = '#ce5e8a'
DIAOZHONGHUAHONG = diaozhonghuahong = (206, 94, 138)
# #ec4e8a 兔眼红
hex['TUYANHONG'] = hex['tuyanhong'] = '#ec4e8a'
TUYANHONG = tuyanhong = (236, 78, 138)
# #ee2c79 紫荆红
hex['ZIJINGHONG'] = hex['zijinghong'] = '#ee2c79'
ZIJINGHONG = zijinghong = (238, 44, 121)
# #951c48 菜头紫
hex['CAITOUZI'] = hex['caitouzi'] = '#951c48'
CAITOUZI = caitouzi = (149, 28, 72)
# #621d34 鹞冠紫
hex['YAOGUANZI'] = hex['yaoguanzi'] = '#621d34'
YAOGUANZI = yaoguanzi = (98, 29, 52)
# #62102e 葡萄酒红
hex['PUTAOJIUHONG'] = hex['putaojiuhong'] = '#62102e'
PUTAOJIUHONG = putaojiuhong = (98, 16, 46)
# #382129 磨石紫
hex['MOSHIZI'] = hex['moshizi'] = '#382129'
MOSHIZI = moshizi = (56, 33, 41)
# #381924 檀紫
hex['TANZI'] = hex['tanzi'] = '#381924'
TANZI = tanzi = (56, 25, 36)
# #33141e 火鹅紫
hex['HUOEZI'] = hex['huoezi'] = '#33141e'
HUOEZI = huoezi = (51, 20, 30)
# #310f1b 墨紫
hex['MOZI'] = hex['mozi'] = '#310f1b'
MOZI = mozi = (49, 15, 27)
# #eea6b7 晶红
hex['JINGHONG'] = hex['jinghong'] = '#eea6b7'
JINGHONG = jinghong = (238, 166, 183)
# #ef498b 扁豆花红
hex['BIANDOUHUAHONG'] = hex['biandouhuahong'] = '#ef498b'
BIANDOUHUAHONG = biandouhuahong = (239, 73, 139)
# #de7897 白芨红
hex['BAIJIHONG'] = hex['baijihong'] = '#de7897'
BAIJIHONG = baijihong = (222, 120, 151)
# #de3f7c 嫩菱红
hex['NENLINGHONG'] = hex['nenlinghong'] = '#de3f7c'
NENLINGHONG = nenlinghong = (222, 63, 124)
# #d13c74 菠根红
hex['BOGENHONG'] = hex['bogenhong'] = '#d13c74'
BOGENHONG = bogenhong = (209, 60, 116)
# #c5708b 酢酱草红
hex['CUJIANGCAOHONG'] = hex['cujiangcaohong'] = '#c5708b'
CUJIANGCAOHONG = cujiangcaohong = (197, 112, 139)
# #a8456b 洋葱紫
hex['YANGCONGZI'] = hex['yangcongzi'] = '#a8456b'
YANGCONGZI = yangcongzi = (168, 69, 107)
# #4b1e2f 海象紫
hex['HAIXIANGZI'] = hex['haixiangzi'] = '#4b1e2f'
HAIXIANGZI = haixiangzi = (75, 30, 47)
# #461629 绀紫
hex['GANZI'] = hex['ganzi'] = '#461629'
GANZI = ganzi = (70, 22, 41)
# #440e25 古铜紫
hex['GUTONGZI'] = hex['gutongzi'] = '#440e25'
GUTONGZI = gutongzi = (68, 14, 37)
# #f0c9cf 石蕊红
hex['SHIRUIHONG'] = hex['shiruihong'] = '#f0c9cf'
SHIRUIHONG = shiruihong = (240, 201, 207)
# #eba0b3 芍药耕红
hex['SHAOYAOGENGHONG'] = hex['shaoyaogenghong'] = '#eba0b3'
SHAOYAOGENGHONG = shaoyaogenghong = (235, 160, 179)
# #ec2d7a 藏花红
hex['CANGHUAHONG'] = hex['canghuahong'] = '#ec2d7a'
CANGHUAHONG = canghuahong = (236, 45, 122)
# #e16c96 初荷红
hex['CHUHEHONG'] = hex['chuhehong'] = '#e16c96'
CHUHEHONG = chuhehong = (225, 108, 150)
# #ede3e7 马鞭草紫
hex['MABIANCAOZI'] = hex['mabiancaozi'] = '#ede3e7'
MABIANCAOZI = mabiancaozi = (237, 227, 231)
# #e9d7df 丁香淡紫
hex['DINGXIANGDANZI'] = hex['dingxiangdanzi'] = '#e9d7df'
DINGXIANGDANZI = dingxiangdanzi = (233, 215, 223)
# #d2568c 丹紫红
hex['DANZIHONG'] = hex['danzihong'] = '#d2568c'
DANZIHONG = danzihong = (210, 86, 140)
# #d2357d 玫瑰红
hex['MEIGUIHONG'] = hex['meiguihong'] = '#d2357d'
MEIGUIHONG = meiguihong = (210, 53, 125)
# #d1c2d3 淡牵牛紫
hex['DANQIANNIUZI'] = hex['danqianniuzi'] = '#d1c2d3'
DANQIANNIUZI = danqianniuzi = (209, 194, 211)
# #c8adc4 凤信紫
hex['FENGXINZI'] = hex['fengxinzi'] = '#c8adc4'
FENGXINZI = fengxinzi = (200, 173, 196)
# #c08eaf 萝兰紫
hex['LUOLANZI'] = hex['luolanzi'] = '#c08eaf'
LUOLANZI = luolanzi = (192, 142, 175)
# #ba2f7b 玫瑰紫
hex['MEIGUIZI'] = hex['meiguizi'] = '#ba2f7b'
MEIGUIZI = meiguizi = (186, 47, 123)
# #8076a3 藤萝紫
hex['TENGLUOZI'] = hex['tengluozi'] = '#8076a3'
TENGLUOZI = tengluozi = (128, 118, 163)
# #806d9e 槿紫
hex['JINZI'] = hex['jinzi'] = '#806d9e'
JINZI = jinzi = (128, 109, 158)
# #815c94 蕈紫
hex['XUNZI'] = hex['xunzi'] = '#815c94'
XUNZI = | |
import random
import json
from inspect import isfunction
import asyncio
"""
This file contains all of the built in Kahoot handlers,
as well as methods to handle said handlers.
"""
ANS_TYPE = 0 # Answer type this instance uses
def get_place(place):
# Method for determining place
if place == 1:
return "st"
if place == 2:
return "nd"
if place == 3:
return "rd"
return "th"
class BaseKahootHandler(object):
"""
Base ID handler for handling Kahoot events
"""
def __init__(self, id_num):
self.id = id_num # ID of the event to handle
self.kahoot = None # Instance of the Kahoot object
async def hand(self, data):
# Method called when a request matching the ID needs to be handled,
# And data is the relevant game data
pass
async def start(self):
# Called on modules when they are registered
pass
async def stop(self):
# called on modules when they need to be stopped
pass
def bind(self, inst=None):
# Binds the Kahoot object to the handler
self.kahoot = inst
class NullKahootHandler(BaseKahootHandler):
"""
Kahoot handler that does nothing.
Default handler loaded for all ID's,
To ensure data is always being handled.
Great for handling information that we don't care about.
"""
def __init__(self, id_num):
super().__init__(id_num)
async def hand(self, data):
# Do nothing
pass
class PrintKahootHandler(BaseKahootHandler):
"""
A Handler that prints message content to the terminal.
Great for debugging purposes.
"""
def __init__(self, id_num):
super().__init__(id_num)
async def hand(self, data):
print(data)
class DefaultInfoGrabFail(BaseKahootHandler):
"""
Default Handler for handling quiz info grab failure
Also manages the grabbing of Kahoot data
"""
def __int__(self):
super().__init__(-3)
self.error_codes = {41: "Quiz UUID is invalid, please re-enter the correct quiz UUID"}
self.kahoot._auto_fetch_info = False # Setting variable for fetching info
async def hand(self, data):
# Handling info grab fail
print("\n+====================================================+")
print("Error Grabbing Quiz Information:")
print("\n--== Error Information: ==--")
print("Error Name: {}".format(data['error']))
print("Error Code: {}".format(data['errorCode']))
print("Error ID: {}".format(data['errorId']))
print("\n--== Argument Errors: ==--")
for num, field in enumerate(data['fields']):
# Iterating over arguments:
print("Argument {}: {}".format(num, field))
print("\n--== Assessment: ==--\n")
print(self.error_codes[data['errorCode']])
async def start(self):
# Function for prompting for a UUID
while True:
print("\n+====================================================+")
print("This bot requires extra quiz information to function.")
print("This allows the bot to prompt/automatically answer the question correctly.")
print("We have a few methods of acquiring this information:")
print("\n[1]: Manually enter quiz UUID")
print(" Will fetch quiz information using UUID given by user.")
print(" Fast and guaranteed to be accurate,")
print(" Given that the UUID entered is correct.")
print("[2]: Automatically fetch information ")
print(" Will use game data given to automatically search for a match.")
print(" Information may not be accurate, and it may take longer to locate compared to manual search.")
print("\n(You will be alerted if the given quiz information is incorrect)")
inp = int(input("\nEnter the number of your option:"))
if inp == 1:
# User wants to manually enter quiz UUID
print("\n+====================================================+")
print("Please enter the quiz UUID below:")
print("\nThe quiz UUID can be found by looking at the URL of the Kahoot game")
print("It should look something like this:")
print("\nhttps://play.kahoot.it/#/lobby?quizId=[QUIZ ID HERE]")
print("\nIt is important to be exact, and the UUID is case-sensitive.")
print("You may return to the previous menu by entering 'return' or 'r'.")
uuid = str(input("Enter UUID(or 'return'):"))
if uuid.lower() == 'return':
# User wants to return to the previous menu
continue
# Searching for UUID:
val = await self.kahoot.info.get_info_by_uuid(uuid)
# We don't care about the returncode, it will be handled if it is incorrect/valid
return
if inp == 2:
# User wants to automatically fetch information
print("\n+====================================================+")
print("When we have the necessary game data, we will automatically fetch the quiz information.")
print("However, please be aware this this method of fetching information may not be accurate.")
print("Some search parameters can be configured, such as depth, and relevant search topics.")
print("These values are set at the Kahoot default, "
"but they can be changed to increase the chances of finding the Kahoot.")
print("(It is recommended for most users to keep them at their default values)")
print("\nAre you sure you want to automatically search for quiz information?")
print("(You may enter 'no' if you wish to return to the previous screen.")
inp = str(input("\n(Y/N")).lower()
if inp not in ['yes', 'y', 'ye']:
# User does not want to continue
continue
# TODO: Find a better way to configure search parameters
# I think some more work needs to go into the SearchParameter object, found in quiz.py
# We should add features that allows for the listing and iteration of these objects.
'''
print("\nWould you like configure these search parameters?")
print("(Yes to configure, No to keep defaults)")
inp = str(input("\n(Y/N):")).lower()
if inp in ['yes', 'y', 'ye']:
# User wants to configure search parameters
val = await self._configure_search_params()
if not val:
# User wants to return
continue
'''
print("\nConfiguration complete.")
print("Automatically fetching quiz information when game data is available.\n")
self.kahoot._auto_fetch_info = True
return
# TODO: Fix this function!
# This function is terribly designed and is poorly optimised.
# Their should be a better way of doing this
async def _configure_search_params(self):
# Method for prompting the user to configure search parameters
order = self.kahoot.selected_order
topics = self.kahoot.selected_topics
grade = self.kahoot.selected_grades
usage = self.kahoot.selected_creators
depth = self.kahoot.deep
while True:
print("\n+====================================================+")
print(" --== Search Parameter Configuration: ==--")
print("Please answer the following:")
print("('None' simply means that the default value is nothing, "
"this is the default value for most parameters)")
while True:
# Configuring search type:
print("\n+====================================================+")
print(" --== Search Sorting Options: ==--\n")
for num, sort in enumerate(self.kahoot.orders):
print("[{}]: Sort by {}".format(num, sort))
print("\nDefault Value: {}".format(list(self.kahoot.orders.keys())[
list(self.kahoot.orders.values()).index(
self.kahoot.default_order)]))
print("Value Selected: {}".format("Default Value(Most Relevant)" if order == '' else order))
print("\n+====================================================+")
print("\nDefinition: How to sort/search for Kahoots.")
print("\nPlease enter the number of the search sorting method you want:")
print("(You can leave the prompt blank to accept the currently selected value)")
print("(You can also enter 'd' to accept the default value)")
print("You can also enter 'none' or 'n' to set the value to blank)")
orde = input("Enter number of your choice:")
# Making sure their are no errors while recovering value
try:
if orde == '':
# User wants currently selected value
break
if str(orde).lower() == "d":
# User wants default value, continuing
order = self.kahoot.default_order
break
if str(orde).lower() in ['none', 'n']:
# User wants value to be blank
order = ''
break
# Checking value:
if 1 > order > len(self.kahoot.orders):
# Invalid value
print("\nError: Invalid entry, please try again.\n")
continue
order = self.kahoot.values()[ord]
break
except Exception:
# Invalid entry
print("\nInvalid entry, please try again.\n")
continue
while True:
# Configuring topic type:
print("\n+====================================================+")
print(" --== Search Topic Options: ==--\n")
total = 0
for cat in self.kahoot.topics:
# Iterating over categories
print("\nCategory: {}".format(cat))
for num, val in enumerate(self.kahoot.topics[cat]):
print(" [{}]: {}".format(num + total + 1, val))
total = len(self.kahoot.topics[cat]) - 1
print("\nDefault Value(s): {}".format("None" if self.kahoot.default_topics == [] else self.kahoot.default_topics))
print("Value(s) Selected: {}".format("None" if self.kahoot.selected_topics == [] else self.kahoot.selected_topics))
print("\n+====================================================+")
print("\nDefinition: Topics relevant to Kahoot to search for.")
print("\nPlease enter the number(s) of your option(s).")
print("Your options may be a single value(1),")
print("Or they can multiple values separated by spaces(1 2 3 4).")
print("(You may leave the prompt blank to accept the currently selected value(s))")
print("(You may enter 'd' to accept the default value(s))")
print("(You may also enter 'none' or 'n' to set the value to blank)")
top = input("Enter the number(s) of your options(s):")
try:
# Catch any weird exceptions that may arise
if top == '':
# User wants currently selected values:
break
if top.lower() == 'd':
# User wants default grade
topics = self.kahoot.default_topics
break
if top.lower() in ['n', 'none']:
# User wants blank value
topics = ''
break
selections = top.split()
# Checking if selections are valid
for sel in selections:
if 1 > int(sel) > total + 1:
# Invalid value entered
print("\nError: Invalid entry, please try again.\n")
continue
topics = selections
break
except Exception:
# Weird exception occurred, handling and retrying
print("\nError: Invalid entry, please try | |
<filename>hyperparameter_search_dd2.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import os
import numpy as np
import scipy
import scipy.misc
from skopt import gp_minimize
from skopt.space import Categorical
def expansion_number_to_string(expansion):
if expansion == 0:
return "u08"
elif expansion == 1:
return "qt"
elif expansion == 2:
return "ct"
elif expansion == 3:
return "ch3s10qt"
elif expansion == 4:
return "ch3s15qt"
elif expansion == 5:
return "ch3s20qt"
elif expansion == 6:
return "ch3s25qt"
elif expansion == 7:
return "ch2s10qt"
elif expansion == 8:
return "ch2s15qt"
elif expansion == 9:
return "ch2s20qt"
elif expansion == 10:
return "ch2s25qt"
elif expansion == 11:
return "ch2s30qt"
elif expansion == 12:
return "ch2s35qt"
elif expansion == 13:
return "ch2s40qt"
elif expansion == 14:
return "ch2s45qt"
elif expansion == 15:
return "ch2s50qt"
elif expansion == 16:
return "ch2s55qt"
elif expansion == 17:
return "ch2s60qt"
elif expansion == 18:
return "ch2s65qt"
elif expansion == 19:
return "ch2s70qt"
elif expansion == 20:
return "ch2s75qt"
elif expansion == 21:
return "ch2s80qt"
else:
ex = "invalid expansion number: " + str(expansion)
raise Exception(ex)
def string_to_expansion_number(string):
if string == "u08Exp":
return 0
elif string == "qtExp":
return 1
elif string == "ctExp":
return 2
elif string == "ch3s10qtExp":
return 3
elif string == "ch3s15qtExp":
return 4
elif string == "ch3s20qtExp":
return 5
elif string == "ch3s25qtExp":
return 6
elif string == "ch2s10qtExp":
return 7
elif string == "ch2s15qtExp":
return 8
elif string == "ch2s20qtExp":
return 9
elif string == "ch2s25qtExp":
return 10
elif string == "ch2s30qtExp":
return 11
elif string == "ch2s35qtExp":
return 12
elif string == "ch2s40qtExp":
return 13
elif string == "ch2s45qtExp":
return 14
elif string == "ch2s50qtExp":
return 15
elif string == "ch2s55qtExp":
return 16
elif string == "ch2s60qtExp":
return 17
elif string == "ch2s65qtExp":
return 18
elif string == "ch2s70qtExp":
return 19
elif string == "ch2s75qtExp":
return 20
elif string == "ch2s80qtExp":
return 21
else:
ex = "invalid expansion string: " + string
raise Exception(ex)
def cuicuilco_f_CE_Gauss(arguments):
return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss")
def cuicuilco_f_CE_Gauss_soft(arguments):
return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss_soft")
def cuicuilco_f_CE_Gauss_mix(arguments):
return 1.0 - cuicuilco_evaluation(arguments, measure="CR_Gauss_mix")
def cuicuilco_evaluation(arguments, measure="CR_Gauss", verbose=False):
(L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim,
L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold,
L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion,
L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion,
L4_degree_QT, L4_degree_CT) = arguments
print("invoking cuicuilco_evaluation with arguments:", arguments)
# Testing whether arguments are compatible
incompatible = 0
if L0_pca_out_dim + L0_delta_threshold < L0_sfa_out_dim:
L0_delta_threshold = L0_sfa_out_dim - L0_pca_out_dim
print("Attempting to solve incompatibility case 1", L0_pca_out_dim, L0_delta_threshold, L0_sfa_out_dim)
if L0_delta_threshold < 1 or L0_delta_threshold > 20:
incompatible = 21
if 2 * L2H_sfa_out_dim + L2V_delta_threshold < L2V_sfa_out_dim:
L2V_delta_threshold - 2 * L2H_sfa_out_dim
if L2V_delta_threshold < 1 or L2V_delta_threshold > 20:
incompatible = 22
if L0_pca_out_dim + L0_delta_threshold < L0_sfa_out_dim:
incompatible = 1
elif 2 * L0_sfa_out_dim + L1H_delta_threshold < L1H_sfa_out_dim: # This factor is 2 and not 3 due to overlap
incompatible = 2
elif 2 * L1H_sfa_out_dim + L1V_delta_threshold < L1V_sfa_out_dim: # This factor is 2 and not 3 due to overlap
incompatible = 3
elif 2 * L1V_sfa_out_dim + L2H_delta_threshold < L2H_sfa_out_dim:
incompatible = 4
elif 2 * L2H_sfa_out_dim + L2V_delta_threshold < L2V_sfa_out_dim:
incompatible = 5
elif 2 * L2V_sfa_out_dim + L3H_delta_threshold < L3H_sfa_out_dim:
incompatible = 6
elif 2 * L3H_sfa_out_dim + L3V_delta_threshold < L3V_sfa_out_dim:
incompatible = 7
if L1H_delta_threshold > (2 + 3) * L0_sfa_out_dim:
incompatible = 8
elif L1V_delta_threshold > (2 + 3) * L1H_sfa_out_dim:
incompatible = 9
elif L2H_delta_threshold > 2 * L1V_sfa_out_dim: # the factor here should be actually 4, right?
incompatible = 10
elif L2V_delta_threshold > 2 * L2H_sfa_out_dim:
incompatible = 11
elif L3H_delta_threshold > 2 * L2V_sfa_out_dim:
incompatible = 12
elif L3V_delta_threshold > 2 * L3H_sfa_out_dim:
incompatible = 13
if L0_delta_threshold > L0_sfa_out_dim:
incompatible = 14
elif L1H_delta_threshold > L1H_sfa_out_dim:
incompatible = 15
elif L1V_delta_threshold > L1V_sfa_out_dim:
incompatible = 16
elif L2H_delta_threshold > L2H_sfa_out_dim:
incompatible = 17
elif L2V_delta_threshold > L2V_sfa_out_dim:
incompatible = 18
elif L3H_delta_threshold > L3H_sfa_out_dim:
incompatible = 19
elif L3V_delta_threshold > L3V_sfa_out_dim:
incompatible = 20
if incompatible:
print("Configuration (before fixes):", arguments, " is incompatible (%d) and was skipped" % incompatible)
return 0.0
# Update arguments variable
arguments = (L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim,
L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold,
L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion,
L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion,
L4_degree_QT, L4_degree_CT)
print("Creating configuration file ")
fd = open("MNISTNetwork_24x24_7L_Overlap_config.txt", "w")
txt = ""
for entry in arguments:
txt += str(entry)+ " "
fd.write(txt)
fd.close()
print("created configuration file with contents:", txt)
cuicuilco_experiment_seeds = [112210, 112220, 112230] #, 112240] #[112244, 112255, 112266, 112277] # , 112277]
metrics = []
for cuicuilco_experiment_seed in cuicuilco_experiment_seeds: #112233 #np.random.randint(2**25) # np.random.randn()
os.putenv("CUICUILCO_EXPERIMENT_SEED", str(cuicuilco_experiment_seed))
print("Setting CUICUILCO_EXPERIMENT_SEED: ", str(cuicuilco_experiment_seed))
output_filename = "hyper_t/MNIST_24x24_7L_L0cloneL_%dPC_%dSF_%sExp_%dF_" + \
"L1cloneL_%dSF_%sExp_%dF_L2clone_%dSF_%sExp_%dF_L3cloneL_%dSF_%sExp_%dF_" + \
"L4cloneL_%dSF_%sExp_%dF_L5_%dSF_%sExp_%dF_L6_%dSF_%sExp_%dF_NoHead_QT%dAP_CT%dAP_seed%d.txt"
output_filename = output_filename % (L0_pca_out_dim, L0_delta_threshold, expansion_number_to_string(L0_expansion), L0_sfa_out_dim,
L1H_delta_threshold, expansion_number_to_string(L1H_expansion), L1H_sfa_out_dim,
L1V_delta_threshold, expansion_number_to_string(L1V_expansion), L1V_sfa_out_dim,
L2H_delta_threshold, expansion_number_to_string(L2H_expansion), L2H_sfa_out_dim,
L2V_delta_threshold, expansion_number_to_string(L2V_expansion), L2V_sfa_out_dim,
L3H_delta_threshold, expansion_number_to_string(L3H_expansion), L3H_sfa_out_dim,
L3V_delta_threshold, expansion_number_to_string(L3V_expansion), L3V_sfa_out_dim,
L4_degree_QT, L4_degree_CT, cuicuilco_experiment_seed)
if os.path.isfile(output_filename):
print("file %s already exists, skipping its computation" % output_filename)
else:
command = "time nice -n 19 python -u -m cuicuilco.cuicuilco_run --EnableDisplay=0 --CacheAvailable=0 " + \
"--NetworkCacheReadDir=/local/tmp/escalafl/Alberto/SavedNetworks " + \
"--NetworkCacheWriteDir=/local/tmp/escalafl/Alberto/SavedNetworks " + \
"--NodeCacheReadDir=/local/tmp/escalafl/Alberto/SavedNodes " + \
"--NodeCacheWriteDir=/local/tmp/escalafl/Alberto/SavedNodes " + \
"--ClassifierCacheWriteDir=/local/tmp/escalafl/Alberto/SavedClassifiers " + \
"--SaveSubimagesTraining=0 --SaveAverageSubimageTraining=0 --NumFeaturesSup=9 " + \
"--SaveSorted_AE_GaussNewid=0 --SaveSortedIncorrectClassGaussNewid=0 " + \
"--ComputeSlowFeaturesNewidAcrossNet=0 --UseFilter=0 --EnableGC=1 --SFAGCReducedDim=0 --EnableKNN=0 " + \
"--kNN_k=3 --EnableNCC=0 --EnableSVM=0 --SVM_C=0.125 --SVM_gamma=1.0 --EnableLR=0 " + \
"--AskNetworkLoading=0 --LoadNetworkNumber=-1 --NParallel=2 --EnableScheduler=0 " + \
"--EstimateExplainedVarWithInverse=0 --EstimateExplainedVarWithKNN_k=0 " + \
"--EstimateExplainedVarWithKNNLinApp=0 --EstimateExplainedVarLinGlobal_N=0 --AddNormalizationNode=0 " + \
"--MakeLastPCANodeWhithening=0 --FeatureCutOffLevel=-1.0 --ExportDataToLibsvm=0 " + \
"--IntegerLabelEstimation=0 --MapDaysToYears=0 --CumulativeScores=0 --DatasetForDisplayNewid=0 " + \
"--GraphExactLabelLearning=0 --OutputInsteadOfSVM2=0 --NumberTargetLabels=0 --EnableSVR=0 " + \
"--SVR_gamma=0.85 --SVR_C=48.0 --SVR_epsilon=0.075 --SVRInsteadOfSVM2=1 --ObjectiveLabel=0 " + \
"--ExperimentalDataset=ParamsMNISTFunc --HierarchicalNetwork=MNISTNetwork_24x24_7L_Overlap_dd2_config " + \
"--SleepM=0 2>&1 > " + output_filename
print("excecuting command: ", command)
os.system(command)
if verbose:
print("extracting performance metric from resulting file")
metric = extract_performance_metric_from_file(output_filename, measure=measure)
metrics.append(metric)
return np.array(metric).mean()
def extract_performance_metric_from_file(output_filename, measure = "CR_Gauss", verbose=False):
command_extract = "cat %s | grep New | grep CR_G > del_tmp.txt" % output_filename
os.system(command_extract)
fd = open("del_tmp.txt", "r")
metrics = fd.readline().split(" ")
fd.close()
if verbose:
print("metrics: ", metrics)
if len(metrics) > 10 and metrics[6] == "CR_Gauss":
metric_CR_Gauss = float(metrics[7].strip(","))
metric_CR_Gauss_soft = float(metrics[9].strip(","))
if np.isnan(metric_CR_Gauss_soft):
print("warning, nan metric was found and fixed as metric_CR_Gauss - 0.0001")
metric_CR_Gauss_soft = metric_CR_Gauss - 0.0001
else:
print("unable to find metrics in file (defaulting to 0.95)")
metric_CR_Gauss = 0.95
metric_CR_Gauss_soft = 0.95
if measure == "CR_Gauss":
metric = metric_CR_Gauss
elif measure == "CR_Gauss_soft":
metric = metric_CR_Gauss_soft
elif measure == "CR_Gauss_mix":
metric = 0.5 * (metric_CR_Gauss + metric_CR_Gauss_soft)
else:
er = "invalid measure: " + str(measure)
raise Exception(er)
# print("metric_CR_Gauss: ", metric_CR_Gauss, " metric_CR_Gauss_soft:", metric_CR_Gauss_soft)
return metric
def load_saved_executions(measure="CR_Gauss", dimensions=None, verbose=False):
path = "hyper_t"
only_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
only_files = [f for f in only_files if f.startswith("MNIST_24x24_7L")]
arguments_list = []
results_list = []
for f in only_files:
# print("filename %s was found" % f)
# MNIST_24x24_7L_L0cloneL_16PC_1SF_qtExp_25F_L1cloneL_1SF_u08Exp_20F_L2clone_30SF_u08Exp_80F_L3cloneL_1SF_u08Exp_100F_L4cloneL_20F_u08Exp_120F_L5_20F_u08Exp_90SF_L6_20F_u08Exp_250SF_NoHead_QT90AP_CT25AP_seed13153651.txt
vals = f.split("_")
vals = [val.strip("PCFSseedQTA.txt") for val in vals]
if verbose:
print("vals=", vals)
# quit()
if len(vals) >= 36:
L0_pca_out_dim = int(vals[4])
L0_sfa_out_dim = int(vals[7])
L1H_sfa_out_dim = int(vals[11])
L1V_sfa_out_dim = int(vals[15])
L2H_sfa_out_dim = int(vals[19])
L2V_sfa_out_dim = int(vals[23])
L3H_sfa_out_dim = int(vals[27])
L3V_sfa_out_dim = int(vals[31])
L0_delta_threshold = int(vals[5])
L1H_delta_threshold = int(vals[9])
L1V_delta_threshold = int(vals[13])
L2H_delta_threshold = int(vals[17])
L2V_delta_threshold = int(vals[21])
L3H_delta_threshold = int(vals[25])
L3V_delta_threshold = int(vals[29])
L0_expansion = string_to_expansion_number(vals[6])
L1H_expansion = string_to_expansion_number(vals[10])
L1V_expansion = string_to_expansion_number(vals[14])
L2H_expansion = string_to_expansion_number(vals[18])
L2V_expansion = string_to_expansion_number(vals[22])
L3H_expansion = string_to_expansion_number(vals[26])
L3V_expansion = string_to_expansion_number(vals[30])
L4_degree_QT = int(vals[33])
L4_degree_CT = int(vals[34])
seed = int(vals[35])
arguments = [L0_pca_out_dim, L0_sfa_out_dim, L1H_sfa_out_dim, L1V_sfa_out_dim, L2H_sfa_out_dim, L2V_sfa_out_dim, L3H_sfa_out_dim, L3V_sfa_out_dim, L0_delta_threshold, L1H_delta_threshold, L1V_delta_threshold, L2H_delta_threshold, L2V_delta_threshold, L3H_delta_threshold, L3V_delta_threshold, L0_expansion, L1H_expansion, L1V_expansion, L2H_expansion, L2V_expansion, L3H_expansion, L3V_expansion, L4_degree_QT, L4_degree_CT]
if verbose:
print("parsed arguments:", arguments)
metric = extract_performance_metric_from_file(os.path.join(path, f), measure)
arguments_list.append(arguments)
results_list.append(metric)
else:
print("Error parging values", vals)
if len(arguments_list) > 0:
results_list = np.array(results_list)
# arguments_list = np.array(arguments_list, dtype=int)
ordering = np.argsort(results_list)[::-1]
results_list = results_list[ordering]
sorted_arguments_list = []
for i in range(len(ordering)):
sorted_arguments_list.append(arguments_list[ordering[i]])
arguments_list = | |
+ iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
def lisp_udp_checksum ( source , dest , data ) :
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
IiII1iiI = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
OooOOOoOoo0O0 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
O0OOOOo0 = socket . htonl ( len ( data ) )
OOooO0Oo00 = socket . htonl ( LISP_UDP_PROTOCOL )
iIIIIIIIiIII = IiII1iiI . pack_address ( )
iIIIIIIIiIII += OooOOOoOoo0O0 . pack_address ( )
iIIIIIIIiIII += struct . pack ( "II" , O0OOOOo0 , OOooO0Oo00 )
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
o0oOo00 = binascii . hexlify ( iIIIIIIIiIII + data )
IiI1III = len ( o0oOo00 ) % 4
for IiIIi1IiiIiI in range ( 0 , IiI1III ) : o0oOo00 += "0"
if 91 - 91: I11i + Ii1I - OoOoOO00 - OoO0O00 + IiII
if 33 - 33: OoO0O00 - Oo0Ooo / ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , len ( o0oOo00 ) , 4 ) :
Oo0 += int ( o0oOo00 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
Oo0 = struct . pack ( "H" , Oo0 )
o0oOo00 = data [ 0 : 6 ] + Oo0 + data [ 8 : : ]
return ( o0oOo00 )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
def lisp_igmp_checksum ( igmp ) :
i11ii = binascii . hexlify ( igmp )
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , 24 , 4 ) :
Oo0 += int ( i11ii [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
Oo0 = struct . pack ( "H" , Oo0 )
igmp = igmp [ 0 : 2 ] + Oo0 + igmp [ 4 : : ]
return ( igmp )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
def lisp_get_interface_address ( device ) :
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
IIiiI = netifaces . | |
<gh_stars>10-100
import os
from pathlib import Path
from melloddy_tuner import utils
from melloddy_tuner.utils import helper, version
from melloddy_tuner.utils.config import ConfigDict
from melloddy_tuner.utils.standardizer import Standardizer
import time
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import multiprocessing
import melloddy_tuner
from melloddy_tuner.scripts import (
activity_data_formatting,
aggregate_values,
calculate_descriptors,
calculate_lsh_folds,
calculate_scaffold_folds,
filter_classification,
filter_regression,
standardize_smiles,
apply_thresholding,
)
from melloddy_tuner.scripts import csv_2_mtx
from melloddy_tuner.utils import chem_utils, hash_reference_set
from melloddy_tuner.utils.helper import (
load_config,
load_key,
make_dir,
map_2_cont_id,
read_csv,
sanity_check_assay_sizes,
sanity_check_assay_type,
sanity_check_compound_sizes,
sanity_check_uniqueness,
save_df_as_csv,
read_input_file,
save_mtx_as_npy,
)
from sys import platform
if platform == "darwin":
multiprocessing.set_start_method("fork", force=True)
#######################################
"""
Commandline Interface to run MELLODDY-TUNER:
The tool can execute the following commands:
(1) "standard_smiles" : Standardization of given input filer (T2).
(2) "assign_fold": Assign folds by scaffold network.
(3) "calc_desc": Calcuelate molecular descriptors.
(4) "agg_activity_data": Aggregate input activity data (T0, T1).
(5) "apply_thresholding": Apply thresholding to classification data.
(6) "filter_classification_data": Filter classification data.
(7) "filter_regression_data": Filter regression data.
(8) "make_matrices": Create sparse matrices from processed dataframes, ready for SparseChem.
(9) "prepare_4_training": Entire pipeline for training processing including function (1) - (8).
(10) "prepare_4_prediction": Entire pipeline for prediction including function 1,3 and 8.
"""
parser = ArgumentParser(
description=f"MELLODDY-TUNER: Standardardization Tool for IMI MELLODDY (Version: {version.__version__})"
)
subparsers = parser.add_subparsers(
title="subcommands",
help="Use 'tunercli <subcommand> --help' for details about the given subcommand",
)
#######################################
"""
Standardize SMILES Subparser
"""
standardize = subparsers.add_parser(
"standardize_smiles", description="Standardize SMILES structures"
)
standardize.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the structure input file",
required=True,
)
standardize.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
standardize.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
standardize.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
standardize.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
standardize.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 1)",
default=1,
)
standardize.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
standardize.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_standardize_smiles(args):
"""Standardize smiles
Args:
args (Namespace): subparser arguments
"""
# hash_reference_set.main
standardize_smiles.main(vars(args))
standardize.set_defaults(func=do_standardize_smiles)
#######################################
assign_fold = subparsers.add_parser("assign_fold", description="fold assignment")
assign_fold.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the standardized structure input file",
required=True,
)
assign_fold.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
assign_fold.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
assign_fold.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
assign_fold.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
assign_fold.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 1 CPUs)",
default=1,
)
assign_fold.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
assign_fold.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_fold_assignment(args):
"""Standardize smiles
Args:
args (Namespace): subparser arguments
"""
# hash_reference_set.main
calculate_scaffold_folds.main(vars(args))
assign_fold.set_defaults(func=do_fold_assignment)
#######################################
"""
Calculate Descriptor Subparser
"""
calc_desc = subparsers.add_parser(
"calculate_descriptors", description="Calculate descriptors"
)
calc_desc.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the structure input file containing standardized smiles and optional fold ID",
required=True,
)
calc_desc.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
calc_desc.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
calc_desc.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
calc_desc.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
calc_desc.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 1)",
default=1,
)
calc_desc.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
calc_desc.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_calculate_desc(args):
"""Calculate descriptors and assign folds.
Args:
args (Namespace): subparser arguments
"""
calculate_descriptors.main(vars(args))
calc_desc.set_defaults(func=do_calculate_desc)
#######################################
# Descriptor calculation and Locality sensitive hashing based fold assignment
desc_lsh = subparsers.add_parser(
"assign_lsh_fold", description="Run descriptor calculation and LSH based folding."
)
desc_lsh.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the structure input file",
required=True,
)
desc_lsh.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
desc_lsh.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
desc_lsh.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
desc_lsh.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
desc_lsh.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 2 CPUs)",
default=2,
)
desc_lsh.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
desc_lsh.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_calculate_desc_lsh(args):
"""Calculate descriptors and assign folds based on locality sensitive hashing.
Args:
args (Namespace): subparser arguments
"""
calculate_lsh_folds.main(vars(args))
desc_lsh.set_defaults(func=do_calculate_desc_lsh)
#######################################
"""
Aggregate activty data
"""
agg_act_data = subparsers.add_parser(
"agg_activity_data", description="Aggregation of activity data"
)
agg_act_data.add_argument(
"-assay",
"--assay_file",
type=str,
help="path of the assay metadata file T0",
required=True,
)
agg_act_data.add_argument(
"-a",
"--activity_file",
type=str,
help="path of the activity data file T1",
required=True,
)
agg_act_data.add_argument(
"-mt",
"--mapping_table",
type=str,
help="path of the mapping table T5",
required=True,
)
agg_act_data.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
agg_act_data.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
agg_act_data.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
agg_act_data.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
agg_act_data.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 1)",
default=1,
)
agg_act_data.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
agg_act_data.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_agg_activity_data(args):
"""Aggregate activity data
Args:
args (Namespace): subparser arguments
"""
aggregate_values.main(vars(args))
agg_act_data.set_defaults(func=do_agg_activity_data)
#######################################
"""
Apply Thresholding
"""
apply_threshold = subparsers.add_parser(
"apply_thresholding", description="Thresholding of activity data"
)
apply_threshold.add_argument(
"-assay",
"--assay_file",
type=str,
help="path of the assay metadata file T0",
required=True,
)
apply_threshold.add_argument(
"-a",
"--activity_file",
type=str,
help="path of the activity data file T4r",
required=True,
)
apply_threshold.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
apply_threshold.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
apply_threshold.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
apply_threshold.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
apply_threshold.add_argument(
"-n",
"--number_cpu",
type=int,
help="number of CPUs for calculation (default: 1)",
default=1,
)
apply_threshold.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
apply_threshold.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_thresholding(args):
"""Apply thresholding
Args:
args (Namespace): subparser arguments
"""
apply_thresholding.main(vars(args))
apply_threshold.set_defaults(func=do_thresholding)
#######################################
"""
Filter classification data
"""
filter_clf = subparsers.add_parser(
"filter_classification_data", description="filter classification activity data"
)
filter_clf.add_argument(
"-ca",
"--classification_activity_file",
type=str,
help="path of the classification task data T4c",
required=True,
)
filter_clf.add_argument(
"-cw",
"--classification_weight_table",
type=str,
help="path of the classification task definition and metadata T3c",
required=True,
)
filter_clf.add_argument(
"-mt",
"--mapping_table_T5",
type=str,
help="path to mapping table T5",
required=False,
)
filter_clf.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
filter_clf.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
filter_clf.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
filter_clf.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
filter_clf.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
filter_clf.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_filtering_clf(args):
"""filter classification data
Args:
args (Namespace): subparser arguments
"""
filter_classification.main(vars(args))
filter_clf.set_defaults(func=do_filtering_clf)
#######################################
"""
Filter regression data
"""
filter_reg = subparsers.add_parser(
"filter_regression_data", description="filter regression activity data"
)
filter_reg.add_argument(
"-ra",
"--regression_activity_file",
type=str,
help="path of the (censored) regression task data T4r",
required=True,
)
filter_reg.add_argument(
"-rw",
"--regression_weight_table",
type=str,
help="path of the (censored) regression task definition and metadata T3r",
required=True,
)
filter_reg.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
filter_reg.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
filter_reg.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
filter_reg.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
filter_reg.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
filter_reg.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_filtering_reg(args):
"""filter regression data
Args:
args (Namespace): subparser arguments
"""
filter_regression.main(vars(args))
filter_reg.set_defaults(func=do_filtering_reg)
#######################################
"""
Create Sparse Matrices for SparseChem Subparser
"""
sparse_matrices = subparsers.add_parser(
"make_matrices", description="Formatting of activity data"
)
sparse_matrices.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the processed structure input file T6",
required=True,
)
sparse_matrices.add_argument(
"-ac",
"--activity_file_clf",
type=str,
help="path of the processed classification activity file T10c",
)
sparse_matrices.add_argument(
"-wc",
"--weight_table_clf",
type=str,
help="path of the processed classification weight table file T8c",
)
sparse_matrices.add_argument(
"-ar",
"--activity_file_reg",
type=str,
help="path of the processed regression activity file T10r",
)
sparse_matrices.add_argument(
"-wr",
"--weight_table_reg",
type=str,
help="path of the processed regression weight table file T8r",
)
sparse_matrices.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
sparse_matrices.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
sparse_matrices.add_argument(
"-o", "--output_dir", type=str, help="path to output directory", required=True
)
sparse_matrices.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
sparse_matrices.add_argument(
"-t",
"--tag",
type=str,
help="tag to identify classifcation with or without auxiliary data",
required=True,
)
sparse_matrices.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
sparse_matrices.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
def do_make_sparse_matrices(args):
"""Create matrices form dataframes, ready for SparseChem.
Args:
args (Namespace): subparser arguments
"""
csv_2_mtx.main(vars(args))
sparse_matrices.set_defaults(func=do_make_sparse_matrices)
#######################################
"""
Prepare_4_training Pipeline Subparser
"""
prepare = subparsers.add_parser(
"prepare_4_training",
description="Standardize structures, calculate descriptors and folds, format activity data, and generate matrices",
)
prepare.add_argument(
"-s",
"--structure_file",
type=str,
help="path of the structure input file",
required=True,
)
prepare.add_argument(
"-a", "--activity_file", type=str, help="path of the activity input file"
)
prepare.add_argument(
"-w", "--weight_table", type=str, | |
University"),
("Florida Atlantic University","Florida Atlantic University"),
("Florida Barber Academy","Florida Barber Academy"),
("Florida Career College-Miami","Florida Career College-Miami"),
("Florida Coastal School of Law","Florida Coastal School of Law"),
("Florida College of Integrative Medicine","Florida College of Integrative Medicine"),
("Florida College of Natural Health-Bradenton","Florida College of Natural Health-Bradenton"),
("Florida College of Natural Health-Maitland","Florida College of Natural Health-Maitland"),
("Florida College of Natural Health-Miami","Florida College of Natural Health-Miami"),
("Florida College of Natural Health-Pompano Beach","Florida College of Natural Health-Pompano Beach"),
("Florida College","Florida College"),
("Florida Education Institute","Florida Education Institute"),
("Florida Gateway College","Florida Gateway College"),
("Florida Gulf Coast University","Florida Gulf Coast University"),
("Florida Institute of Recording Sound and Technology","Florida Institute of Recording Sound and Technology"),
("Florida Institute of Technology","Florida Institute of Technology"),
("Florida Institute of Technology-Online","Florida Institute of Technology-Online"),
("Florida Institute of Ultrasound Inc","Florida Institute of Ultrasound Inc"),
("Florida International University","Florida International University"),
("Florida Keys Community College","Florida Keys Community College"),
("Florida Memorial University","Florida Memorial University"),
("Florida National University-Main Campus","Florida National University-Main Campus"),
("Florida Polytechnic University","Florida Polytechnic University"),
("Florida School of Massage","Florida School of Massage"),
("Florida School of Traditional Midwifery","Florida School of Traditional Midwifery"),
("Florida Southern College","Florida Southern College"),
("Florida State College at Jacksonville","Florida State College at Jacksonville"),
("Florida State University","Florida State University"),
("Florida Technical College","Florida Technical College"),
("Florida Vocational Institute","Florida Vocational Institute"),
("Focus-Hope Information Technologies Center","Focus-Hope Information Technologies Center"),
("Folsom Lake College","Folsom Lake College"),
("Fond du Lac Tribal and Community College","Fond du Lac Tribal and Community College"),
("Fontbonne University","Fontbonne University"),
("Foothill College","Foothill College"),
("Foothill-De Anza Community College District","Foothill-De Anza Community College District"),
("Forbes Road Career and Technology Center","Forbes Road Career and Technology Center"),
("Fordham University","Fordham University"),
("Forest Institute of Professional Psychology","Forest Institute of Professional Psychology"),
("Forrest College","Forrest College"),
("Forsyth Technical Community College","Forsyth Technical Community College"),
("Fort Berthold Community College","Fort Berthold Community College"),
("Fort Hays State University","Fort Hays State University"),
("Fort Lewis College","Fort Lewis College"),
("Fort Myers Institute of Technology","Fort Myers Institute of Technology"),
("Fort Peck Community College","Fort Peck Community College"),
("Fort Pierce Beauty Academy","Fort Pierce Beauty Academy"),
("Fort Scott Community College","Fort Scott Community College"),
("Fort Valley State University","Fort Valley State University"),
("Fort Worth Beauty School","Fort Worth Beauty School"),
("Fortis College School of Cosmetology","Fortis College School of Cosmetology"),
("Fortis College-Akron","Fortis College-Akron"),
("Fortis College-Baton Rouge","Fortis College-Baton Rouge"),
("Fortis College-Centerville","Fortis College-Centerville"),
("Fortis College-Cincinnati","Fortis College-Cincinnati"),
("Fortis College-Columbia","Fortis College-Columbia"),
("Fortis College-Columbus","Fortis College-Columbus"),
("Fortis College-Cutler Bay","Fortis College-Cutler Bay"),
("Fortis College-Cuyahoga Falls","Fortis College-Cuyahoga Falls"),
("Fortis College-Dothan","Fortis College-Dothan"),
("Fortis College-Foley","Fortis College-Foley"),
("Fortis College-Houston","Fortis College-Houston"),
("Fortis College-Indianapolis","Fortis College-Indianapolis"),
("Fortis College-Landover","Fortis College-Landover"),
("Fortis College-Largo","Fortis College-Largo"),
("Fortis College-Miami","Fortis College-Miami"),
("Fortis College-Mobile","Fortis College-Mobile"),
("Fortis College-Montgomery","Fortis College-Montgomery"),
("Fortis College-Montgomery","Fortis College-Montgomery"),
("Fortis College-Norfolk","Fortis College-Norfolk"),
("Fortis College-Orange Park","Fortis College-Orange Park"),
("Fortis College-Phoenix","Fortis College-Phoenix"),
("Fortis College-Ravenna","Fortis College-Ravenna"),
("Fortis College-Richmond","Fortis College-Richmond"),
("Fortis College-Salt Lake City","Fortis College-Salt Lake City"),
("Fortis College-Smyrna","Fortis College-Smyrna"),
("Fortis College-Tampa","Fortis College-Tampa"),
("Fortis College-Winter Park","Fortis College-Winter Park"),
("Fortis Institute-Baltimore","Fortis Institute-Baltimore"),
("Fortis Institute-Birmingham","Fortis Institute-Birmingham"),
("Fortis Institute-Cookeville","Fortis Institute-Cookeville"),
("Fortis Institute-Erie","Fortis Institute-Erie"),
("Fortis Institute-Fort Lauderdale","Fortis Institute-Fort Lauderdale"),
("Fortis Institute-Forty Fort","Fortis Institute-Forty Fort"),
("Fortis Institute-Grand Prairie","Fortis Institute-Grand Prairie"),
("Fortis Institute-Houston","Fortis Institute-Houston"),
("Fortis Institute-Jacksonville","Fortis Institute-Jacksonville"),
("Fortis Institute-Lawrenceville","Fortis Institute-Lawrenceville"),
("Fortis Institute-Miami","Fortis Institute-Miami"),
("Fortis Institute-Mulberry","Fortis Institute-Mulberry"),
("Fortis Institute-Nashville","Fortis Institute-Nashville"),
("Fortis Institute-Pensacola","Fortis Institute-Pensacola"),
("Fortis Institute-Port Saint Lucie","Fortis Institute-Port Saint Lucie"),
("Fortis Institute-Scranton","Fortis Institute-Scranton"),
("Fortis Institute-Towson","Fortis Institute-Towson"),
("Fortis Institute-Wayne","Fortis Institute-Wayne"),
("Fosters Cosmetology College","Fosters Cosmetology College"),
("Fountainhead College of Technology","Fountainhead College of Technology"),
("Four County Career Center","Four County Career Center"),
("Four Rivers Career Center","Four Rivers Career Center"),
("Four-D College","Four-D College"),
("Fox College","Fox College"),
("Fox Institute of Business-Clifton","Fox Institute of Business-Clifton"),
("Fox Institute of Business-West Hartford","Fox Institute of Business-West Hartford"),
("Fox Valley Technical College","Fox Valley Technical College"),
("Framingham State University","Framingham State University"),
("Francis Marion University","Francis Marion University"),
("Francis Tuttle Technology Center","Francis Tuttle Technology Center"),
("Franciscan School of Theology","Franciscan School of Theology"),
("Franciscan University of Steubenville","Franciscan University of Steubenville"),
("Francois D College of Hair Skin and Nails","Francois D College of Hair Skin and Nails"),
("Frank Lloyd Wright School of Architecture","Frank Lloyd Wright School of Architecture"),
("Frank Phillips College","Frank Phillips College"),
("Franklin Academy","Franklin Academy"),
("Franklin Beauty School","Franklin Beauty School"),
("Franklin Career College","Franklin Career College"),
("Franklin Career Institute","Franklin Career Institute"),
("Franklin College","Franklin College"),
("Franklin County Career and Technology Center","Franklin County Career and Technology Center"),
("Franklin Pierce University","Franklin Pierce University"),
("Franklin Technology-MSSU","Franklin Technology-MSSU"),
("Franklin University","Franklin University"),
("Franklin W Olin College of Engineering","Franklin W Olin College of Engineering"),
("Franklin and Marshall College","Franklin and Marshall College"),
("Fred W Eberle Technical Center","Fred W Eberle Technical Center"),
("Frederick Community College","Frederick Community College"),
("Frederick School of Cosmetology","Frederick School of Cosmetology"),
("Fredrick and Charles Beauty College","Fredrick and Charles Beauty College"),
("Freed-Hardeman University","Freed-Hardeman University"),
("Fremont College","Fremont College"),
("French Academy of Cosmetology","French Academy of Cosmetology"),
("Fresno City College","Fresno City College"),
("Fresno Pacific University","Fresno Pacific University"),
("Friends University","Friends University"),
("Front Range Community College","Front Range Community College"),
("Frontier Community College","Frontier Community College"),
("Frontier Nursing University","Frontier Nursing University"),
("Frostburg State University","Frostburg State University"),
("Full Sail University","Full Sail University"),
("Fullen School of Hair Design","Fullen School of Hair Design"),
("Fuller Theological Seminary in California","Fuller Theological Seminary in California"),
("Fullerton College","Fullerton College"),
("Fulton-Montgomery Community College","Fulton-Montgomery Community College"),
("Furman University","Furman University"),
("Futura Career Institute","Futura Career Institute"),
("Future Generations Graduate School","Future Generations Graduate School"),
("Future-Tech Institute","Future-Tech Institute"),
("G Skin & Beauty Institute","G Skin & Beauty Institute"),
("GP Institute of Cosmetology","GP Institute of Cosmetology"),
("GUTI The Premier Beauty & Wellness Academy","GUTI The Premier Beauty & Wellness Academy"),
("Gadsden State Community College","Gadsden State Community College"),
("Gainesville State College","Gainesville State College"),
("Galaxy Medical College","Galaxy Medical College"),
("Galen College of Medical and Dental Assistants","Galen College of Medical and Dental Assistants"),
("Galen College of Nursing-Cincinnati","Galen College of Nursing-Cincinnati"),
("Galen College of Nursing-Louisville","Galen College of Nursing-Louisville"),
("Galen College of Nursing-San Antonio","Galen College of Nursing-San Antonio"),
("Galen College of Nursing-Tampa Bay","Galen College of Nursing-Tampa Bay"),
("Gallaudet University","Gallaudet University"),
("Gallery College of Beauty","Gallery College of Beauty"),
("Gallipolis Career College","Gallipolis Career College"),
("Galveston College","Galveston College"),
("Gannon University","Gannon University"),
("Garden City Community College","Garden City Community College"),
("Gardner-Webb University","Gardner-Webb University"),
("Garnet Career Center","Garnet Career Center"),
("Garrett College","Garrett College"),
("Garrett-Evangelical Theological Seminary","Garrett-Evangelical Theological Seminary"),
("<NAME> Institute","<NAME>"),
("Gaston College","Gaston College"),
("GateWay Community College","GateWay Community College"),
("Gateway Community College","Gateway Community College"),
("Gateway Community and Technical College","Gateway Community and Technical College"),
("Gateway Technical College","Gateway Technical College"),
("Gavilan College","Gavilan College"),
("Geisinger-Lewistown Hospital School of Nursing","Geisinger-Lewistown Hospital School of Nursing"),
("Gem City College","Gem City College"),
("Gemini School of Visual Arts & Communication","Gemini School of Visual Arts & Communication"),
("Gemological Institute of America-Carlsbad","Gemological Institute of America-Carlsbad"),
("Gemological Institute of America-New York","Gemological Institute of America-New York"),
("Gene Juarez Academy of Beauty-Federal Way","Gene Juarez Academy of Beauty-Federal Way"),
("Gene Juarez Academy of Beauty-Mountlake Terrace","Gene Juarez Academy of Beauty-Mountlake Terrace"),
("Genesee Community College","Genesee Community College"),
("Genesee Valley BOCES-Practical Nursing Program","Genesee Valley BOCES-Practical Nursing Program"),
("Genesis Career College-Cookeville","Genesis Career College-Cookeville"),
("Genesis Career College-Lebanon","Genesis Career College-Lebanon"),
("Geneva College","Geneva College"),
("Gentle Healing School of Massage","Gentle Healing School of Massage"),
("George C Wallace State Community College-Dothan","George C Wallace State Community College-Dothan"),
("George C Wallace State Community College-Hanceville","George C Wallace State Community College-Hanceville"),
("George C Wallace State Community College-Selma","George C Wallace State Community College-Selma"),
("George Fox University","George Fox University"),
("George Mason University","George Mason University"),
("George Stone Technical Center","George Stone Technical Center"),
("George T Baker Aviation School","George T Baker Aviation School"),
("George Washington University","George Washington University"),
("Georgetown College","Georgetown College"),
("Georgetown University","Georgetown University"),
("Georgia Beauty Academy","Georgia Beauty Academy"),
("Georgia Career Institute","Georgia Career Institute"),
("Georgia Christian University","Georgia Christian University"),
("Georgia College and State University","Georgia College and State University"),
("Georgia Gwinnett College","Georgia Gwinnett College"),
("Georgia Health Sciences University","Georgia Health Sciences University"),
("Georgia Highlands College","Georgia Highlands College"),
("Georgia Institute of Cosmetology","Georgia Institute of Cosmetology"),
("Georgia Institute of Technology-Main Campus","Georgia Institute of Technology-Main Campus"),
("Georgia Military College","Georgia Military College"),
("Georgia Military College-Distance Learning Campuses","Georgia Military College-Distance Learning Campuses"),
("Georgia Northwestern Technical College","Georgia Northwestern Technical College"),
("Georgia Perimeter College","Georgia Perimeter College"),
("Georgia Piedmont Technical College","Georgia Piedmont Technical College"),
("Georgia Regents University","Georgia Regents University"),
("Georgia Southern University","Georgia Southern University"),
("Georgia Southwestern State University","Georgia Southwestern State University"),
("Georgia State University","Georgia State University"),
("Georgian Court University","Georgian Court University"),
("Gerbers Akron Beauty School","Gerbers Akron Beauty School"),
("Germanna Community College","Germanna Community College"),
("Gerstner Sloan-Kettering Graduate School of Biomedical Sciences","Gerstner Sloan-Kettering Graduate School of Biomedical Sciences"),
("Gettysburg College","Gettysburg College"),
("Gill-Tech Academy of Hair Design","Gill-Tech Academy of Hair Design"),
("Glen Dow Academy of Hair Design","Glen Dow Academy of Hair Design"),
("Glen Oaks Community College","Glen Oaks Community College"),
("Glendale Career College","Glendale Career College"),
("Glendale Community College","Glendale Community College"),
("Glendale Community College","Glendale Community College"),
("Glenville State College","Glenville State College"),
("Glenwood Beauty Academy","Glenwood Beauty Academy"),
("Global Business Institute","Global Business Institute"),
("Global Business Institute","Global Business Institute"),
("Global Health College","Global Health College"),
("Global Institute","Global Institute"),
("Globe Institute of Technology","Globe Institute of Technology"),
("Globe University-Appleton","Globe University-Appleton"),
("Globe University-Eau Claire","Globe University-Eau Claire"),
("Globe University-La Crosse","Globe University-La Crosse"),
("Globe University-Madison East","Globe University-Madison East"),
("Globe University-Minneapolis","Globe University-Minneapolis"),
("Globe University-Sioux Falls","Globe University-Sioux Falls"),
("Globe University-Woodbury","Globe University-Woodbury"),
("Globe University�Green Bay","Globe University�Green Bay"),
("Globe University�Madison West","Globe University�Madison West"),
("Globe University�Wausau","Globe University�Wausau"),
("Globelle Technical Institute","Globelle Technical Institute"),
("Gloucester County College","Gloucester County College"),
("Gnomon School of Visual Effects","Gnomon School of Visual Effects"),
("Goddard College","Goddard College"),
("Gods Bible School and College","Gods Bible School and College"),
("Gogebic Community College","Gogebic Community College"),
("Golden Gate University-San Francisco","Golden Gate University-San Francisco"),
("Golden State College of Court Reporting","Golden State College of Court Reporting"),
("Golden West College","Golden West College"),
("Goldey-Beacom College","Goldey-Beacom College"),
("Golf Academy of America-Altamonte Springs","Golf Academy of America-Altamonte Springs"),
("Golf Academy of America-Carlsbad","Golf Academy of America-Carlsbad"),
("Golf Academy of America-Farmers Branch","Golf Academy of America-Farmers Branch"),
("Golf Academy of America-Myrtle Beach","Golf Academy of America-Myrtle Beach"),
("Golf Academy of America-Phoenix","Golf Academy of America-Phoenix"),
("Gonzaga University","Gonzaga University"),
("Good Samaritan College of Nursing and Health Science","Good Samaritan College of Nursing and Health Science"),
("Good Samaritan Hospital School of Radiologic Technology","Good | |
from dai_imports import*
from utils import *
from obj_utils import*
from model import *
from fc import *
from darknet import*
import resnet_unet
import resnet_unet_2
import unet_model
import time
class CNNetwork(Network):
def __init__(self,
model = None,
model_name = 'custom_CNN',
model_type='regressor',
lr=0.02,
one_cycle_factor = 0.5,
criterion = nn.NLLLoss(),
optimizer_name = 'sgd',
dropout_p=0.45,
device=None,
best_accuracy=0.,
best_validation_loss=None,
best_model_file ='best_cnn_model.pth',
chkpoint_file ='chkpoint_file.pth',
class_names = [],
num_classes = None,
):
super().__init__(device=device)
self.set_model_params(criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
one_cycle_factor = one_cycle_factor,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
class_names = class_names,
num_classes = num_classes
)
self.model = model.to(device)
def set_model_params(self,criterion,
optimizer_name,
lr,
one_cycle_factor,
dropout_p,
model_name,
model_type,
best_accuracy,
best_validation_loss,
best_model_file,
chkpoint_file,
head,
class_names,
num_classes):
super(CNNetwork, self).set_model_params(
criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
one_cycle_factor = one_cycle_factor,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
class_names = class_names,
num_classes = num_classes
)
def forward(self,x):
return self.model(x)
class TransferNetworkImg(Network):
def __init__(self,
model_name='DenseNet',
model_type='cv_transfer',
lr=0.02,
one_cycle_factor = 0.5,
criterion = nn.CrossEntropyLoss(),
optimizer_name = 'Adam',
dropout_p=0.45,
pretrained=True,
device=None,
best_accuracy=0.,
best_validation_loss=None,
best_model_file ='best_model.pth',
chkpoint_file ='chkpoint_file.pth',
head = {'num_outputs':10,
'layers':[],
'model_type':'classifier'
},
pre_trained_back = None,
class_names = [],
num_classes = None,
add_extra = True,
set_params = True,
set_head = True
):
super().__init__(device=device)
self.set_transfer_model(model_name,pretrained=pretrained,add_extra=add_extra,drop_out=dropout_p)
if set_head:
self.set_model_head(model_name = model_name,
head = head,
pre_trained_back = pre_trained_back,
dropout_p = dropout_p,
criterion = criterion,
device = device
)
if set_params:
self.set_model_params(criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
one_cycle_factor = one_cycle_factor,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
head = head,
class_names = class_names,
num_classes = num_classes
)
self.model = self.model.to(device)
def set_model_params(self,criterion,
optimizer_name,
lr,
one_cycle_factor,
dropout_p,
model_name,
model_type,
best_accuracy,
best_validation_loss,
best_model_file,
chkpoint_file,
head,
class_names,
num_classes):
print('Transfer Learning: current best accuracy = {:.3f}'.format(best_accuracy))
super(TransferNetworkImg, self).set_model_params(
criterion = criterion,
optimizer_name = optimizer_name,
lr = lr,
one_cycle_factor = one_cycle_factor,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
class_names = class_names,
num_classes = num_classes
)
# self.head = head
if len(class_names) == 0:
self.class_names = {k:str(v) for k,v in enumerate(list(range(head['num_outputs'])))}
# if 'class_names' in head.keys():
# if head['class_names'] is not None:
# if len(head['class_names']) > 0:
# self.class_names = head['class_names']
# self.to(self.device)
def forward(self,x):
return self.model(x)
def get_model_params(self):
params = super(TransferNetworkImg, self).get_model_params()
params['head'] = self.head
params['device'] = self.device
return params
def freeze(self,train_classifier=True):
super(TransferNetworkImg, self).freeze()
if train_classifier:
for param in self.model.fc.parameters():
param.requires_grad = True
# if self.model_name.lower() == 'densenet':
# for param in self.model.classifier.parameters():
# param.requires_grad = True
# elif self.model_name.lower() == 'resnet34':
# for param in self.model.fc.parameters():
# param.requires_grad = True
def set_transfer_model(self,mname,pretrained=True,add_extra=True,drop_out = 0.45):
self.model = None
models_dict = {
'densenet': {'model':models.densenet121(pretrained=pretrained),'conv_channels':1024,'grid_ceil':False},
'resnet34': {'model':models.resnet34(pretrained=pretrained),'conv_channels':512,'grid_ceil':True},
'resnet50': {'model':models.resnet50(pretrained=pretrained),'conv_channels':2048,'grid_ceil':True}
}
meta = models_dict[mname.lower()]
self.grid_ceil = meta['grid_ceil']
try:
# self.model = nn.Sequential(*list(models_dict[mname.lower()].modules()))
model = meta['model']
for param in model.parameters():
param.requires_grad = False
self.model = model
print('Setting transfer learning model: self.model set to {}'.format(mname))
except:
print('Setting transfer learning model: model name {} not supported'.format(mname))
# creating and adding extra layers to the model
dream_model = None
if add_extra:
channels = meta['conv_channels']
dream_model = nn.Sequential(
nn.Conv2d(channels,channels,3,1,1),
# Printer(),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Dropout2d(drop_out),
nn.Conv2d(channels,channels,3,1,1),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Dropout2d(drop_out),
nn.Conv2d(channels,channels,3,1,1),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Dropout2d(drop_out)
)
self.dream_model = dream_model
def set_model_head(self,
model_name = 'DenseNet',
head = {'num_outputs':10,
'layers':[],
'class_names': None,
'model_type':'classifier'
},
pre_trained_back = None,
criterion = nn.NLLLoss(),
adaptive = True,
dropout_p = 0.45,
device = None):
# models_meta = {
# 'resnet': {'head_id': -2, 'adaptive_head': [DAI_AvgPool,Flatten()],'normal_head': [nn.AvgPool2d(7,1),Flatten()]},
# 'densenet': {'head_id': -1,'adaptive_head': [nn.ReLU(inplace=True),DAI_AvgPool,Flatten()]
# ,'normal_head': [nn.ReLU(inplace=True),nn.AvgPool2d(7,1),Flatten()]}
# }
models_meta = {
'resnet34': {'conv_channels':512,'head_id': -2, 'adaptive_head': [DAI_AvgPool],'normal_head': [nn.AvgPool2d(7,1)]},
'resnet50': {'conv_channels':2048,'head_id': -2, 'adaptive_head': [DAI_AvgPool],'normal_head': [nn.AvgPool2d(7,1)]},
'densenet': {'conv_channels':1024,'head_id': -1,'adaptive_head': [nn.ReLU(inplace=True),DAI_AvgPool]
,'normal_head': [nn.ReLU(inplace=True),nn.AvgPool2d(7,1)]}
}
# name = ''.join([x for x in model_name.lower() if x.isalpha()])
name = model_name.lower()
meta = models_meta[name]
if pre_trained_back:
self.model = pre_trained_back
self.dream_model = None
modules = list(self.model.children())
l = modules[:meta['head_id']]
if self.dream_model:
l+=self.dream_model
if type(head).__name__ != 'dict':
model = nn.Sequential(*l)
for layer in head.children():
if(type(layer).__name__) == 'StdConv':
conv_module = layer
break
# temp_conv = head.sconv0.conv
conv_layer = conv_module.conv
temp_args = [conv_layer.out_channels,conv_layer.kernel_size,conv_layer.stride,conv_layer.padding]
temp_args.insert(0,meta['conv_channels'])
conv_layer = nn.Conv2d(*temp_args)
conv_module.conv = conv_layer
# print(head)
# model.add_module('adaptive_avg_pool',DAI_AvgPool)
model.add_module('custom_head',head)
else:
head['criterion'] = criterion
if head['model_type'].lower() == 'classifier':
head['output_non_linearity'] = None
self.num_outputs = head['num_outputs']
fc = modules[-1]
try:
in_features = fc.in_features
except:
in_features = fc.model.out.in_features
fc = FC(
num_inputs = in_features,
num_outputs = head['num_outputs'],
layers = head['layers'],
model_type = head['model_type'],
output_non_linearity = head['output_non_linearity'],
dropout_p = dropout_p,
criterion = head['criterion'],
optimizer_name = None,
device = device
)
if adaptive:
l += meta['adaptive_head']
else:
l += meta['normal_head']
model = nn.Sequential(*l)
model.add_module('fc',fc)
self.model = model
self.head = head
if type(head).__name__ == 'dict':
print('Model: {}, Setting head: inputs: {} hidden:{} outputs: {}'.format(model_name,
in_features,
head['layers'],
head['num_outputs']))
else:
print('Model: {}, Setting head: {}'.format(model_name,type(head).__name__))
def _get_dropout(self):
# if self.model_name.lower() == 'densenet':
# return self.model.classifier._get_dropout()
# elif self.model_name.lower() == 'resnet50' or self.model_name.lower() == 'resnet34':
return self.model.fc._get_dropout()
def _set_dropout(self,p=0.45):
if self.model.classifier is not None:
print('{}: setting head (FC) dropout prob to {:.3f}'.format(self.model_name,p))
self.model.fc._set_dropout(p=p)
# if self.model_name.lower() == 'densenet':
# if self.model.classifier is not None:
# print('DenseNet: setting head (FC) dropout prob to {:.3f}'.format(p))
# self.model.classifier._set_dropout(p=p)
# elif self.model_name.lower() == 'resnet50' or self.model_name.lower() == 'resnet34':
# if self.model.fc is not None:
# print('ResNet: setting head (FC) dropout prob to {:.3f}'.format(p))
# self.model.fc._set_dropout(p=p)
class FacialRec(TransferNetworkImg):
def __init__(self,
model_name='DenseNet',
model_type='cv_transfer',
lr=0.02,
one_cycle_factor = 0.5,
criterion= nn.NLLLoss(),
optimizer_name = 'Adam',
dropout_p=0.45,
pretrained=True,
device=None,
best_accuracy=0.,
best_validation_loss=None,
best_model_file ='best_model.pth',
chkpoint_file ='chkpoint_file.pth',
head = {'num_outputs':10,
'layers':[],
'model_type':'classifier'
},
add_extra = True):
super().__init__(model_name = model_name,
model_type = model_type,
lr = lr,
one_cycle_factor = one_cycle_factor,
criterion = criterion,
optimizer_name = optimizer_name,
dropout_p = dropout_p,
pretrained = pretrained,
device = device,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
head = head,
add_extra=add_extra)
def forward_once(self, x):
return self.model(x)
def forward(self, x):
# s = x.size()
# x1 = torch.ones((s[0],s[2],s[3],s[4]))
# x2 = torch.ones((s[0],s[2],s[3],s[4]))
# for i,a in enumerate(x):
# x1[i] = a[0]
# x2[i] = a[1]
input1,input2 = x[:,0,:,:],x[:,1,:,:]
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
# return (output1, output2)
dist = F.pairwise_distance(output1,output2)
return torch.nn.functional.sigmoid(dist)
class FacialRecCenterLoss(TransferNetworkImg):
def __init__(self,
model_name='DenseNet',
model_type='cv_transfer',
lr=0.02,
one_cycle_factor = 0.5,
criterion= nn.NLLLoss(),
optimizer_name = 'AdaDelta',
dropout_p=0.45,
pretrained=True,
device=None,
best_accuracy=0.,
best_validation_loss=None,
best_model_file ='best_center_loss_model.pth',
chkpoint_file ='center_loss_chkpoint_file.pth',
head = {'num_outputs':10,
'layers':[],
'model_type':'classifier'
},
add_extra = True,
center_features_dim = 512,
lamda = 0.03,
alpha = 0.5
):
super().__init__(model_name = model_name,
model_type = model_type,
lr = lr,
one_cycle_factor = one_cycle_factor,
criterion = criterion,
optimizer_name = optimizer_name,
dropout_p = dropout_p,
pretrained = pretrained,
device = device,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
head = head,
add_extra = add_extra,
set_params = False)
self.lamda = lamda
self.alpha = alpha
self.center_features_dim = center_features_dim
self.centers = ((torch.rand(head['num_outputs'], center_features_dim).to(device) - 0.5) * 2)
self.add_feature_extractor()
super(FacialRecCenterLoss, self).set_model_params(
criterion,
optimizer_name,
lr,
one_cycle_factor,
dropout_p,
model_name,
model_type,
best_accuracy,
best_validation_loss,
best_model_file,
chkpoint_file,
head
)
self.model = self.model.to(device)
def add_feature_extractor(self):
modules = list(self.model.children())
l = modules[:-1]
head = modules[-1]
in_features = list(head.model.children())[0].in_features
extractor = FC(
num_inputs = in_features,
num_outputs = self.center_features_dim,
model_type = 'extractor',
criterion = None,
optimizer_name = None,
device = self.device
)
model = nn.Sequential(*l)
model.add_module('extractor',extractor)
model.add_module('fc',head)
self.model = model
def forward(self, x):
l = list(self.model.children())
for m in l[:-2]:
x = m(x)
feature = l[-2](x)
feature_normed = feature.div(torch.norm(feature, p=2, dim=1, keepdim=True).expand_as(feature))
logits = l[-1](x)
return (logits,feature_normed)
def compute_loss(self,criterion,outputs,labels):
centers = self.centers
out,features = outputs
classifier_loss = criterion(out, labels)
center_loss = compute_center_loss(features,centers,labels)
loss = self.lamda * center_loss + classifier_loss
return(loss,classifier_loss,center_loss)
def train_(self,e,trainloader,criterion,optimizer,print_every):
epoch,epochs = e
self.train()
t0 = time.time()
t1 = time.time()
batches = 0
running_loss = 0.
running_classifier_loss = 0.
running_center_loss = 0.
for data_batch in trainloader:
inputs, labels = data_batch[0],data_batch[1]
batches += 1
inputs, labels = inputs.to(self.device), labels.to(self.device)
| |
possible list
of minimal periods of rational periodic points. Take each point modulo `p` associated to each
of these possible periods and try to lift it to a rational point with a combination of
`p`-adic approximation and the LLL basis reducion algorithm.
See [Hutz]_.
INPUT:
kwds:
- ``prime_bound`` - a pair (list or tuple) of positive integers that represent the
limits of primes to use in the reduction step. Or an integer that represents the upper bound. (optional)
default: [1,20]
- ``lifting_prime`` - a prime integer. (optional) argument that specifies modulo which prime to try and perform the
lifting. default: 23
- ``periods`` - a list of positive integers which is the list of possible periods. (optional)
- ``bad_primes`` - a list or tuple of integer primes, the primes of bad reduction. (optional)
- ``ncpus`` - number of cpus to use in parallel. (optional)
default: all available cpus.
OUTPUT:
- a list of rational points in projective space.
Examples::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([x^2-3/4*y^2, y^2])
sage: sorted(f.rational_periodic_points(prime_bound=20, lifting_prime=7)) # long time
[(-1/2 : 1), (1 : 0), (3/2 : 1)]
::
sage: P.<x,y,z> = ProjectiveSpace(QQ,2)
sage: H = End(P)
sage: f = H([2*x^3 - 50*x*z^2 + 24*z^3, 5*y^3 - 53*y*z^2 + 24*z^3, 24*z^3])
sage: sorted(f.rational_periodic_points(prime_bound=[1,20])) # long time
[(-3 : -1 : 1), (-3 : 0 : 1), (-3 : 1 : 1), (-3 : 3 : 1), (-1 : -1 : 1),
(-1 : 0 : 1), (-1 : 1 : 1), (-1 : 3 : 1), (0 : 1 : 0), (1 : -1 : 1), (1
: 0 : 0), (1 : 0 : 1), (1 : 1 : 1), (1 : 3 : 1), (3 : -1 : 1), (3 : 0 :
1), (3 : 1 : 1), (3 : 3 : 1), (5 : -1 : 1), (5 : 0 : 1), (5 : 1 : 1), (5
: 3 : 1)]
::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([-5*x^2 + 4*y^2, 4*x*y])
sage: sorted(f.rational_periodic_points()) # long time
[(-2 : 1), (-2/3 : 1), (2/3 : 1), (1 : 0), (2 : 1)]
::
sage: R.<x> = QQ[]
sage: K.<w> = NumberField(x^2-x+1)
sage: P.<u,v> = ProjectiveSpace(K,1)
sage: H = End(P)
sage: f = H([u^2 + v^2,v^2])
sage: f.rational_periodic_points()
[(w : 1), (-w + 1 : 1), (1 : 0)]
::
sage: R.<x> = QQ[]
sage: K.<w> = NumberField(x^2-x+1)
sage: P.<u,v> = ProjectiveSpace(K,1)
sage: H = End(P)
sage: f = H([u^2+v^2,u*v])
sage: f.rational_periodic_points()
Traceback (most recent call last):
...
NotImplementedError: rational periodic points for number fields only implemented for polynomials
"""
if not self.is_endomorphism():
raise NotImplementedError("must be an endomorphism of projective space")
PS = self.domain()
K = PS.base_ring()
if K in _NumberFields:
if not K.is_absolute():
raise TypeError("base field must be an absolute field")
d = K.absolute_degree()
#check that we are not over QQ
if d > 1:
if PS.dimension_relative() != 1:
raise NotImplementedError("rational periodic points for number fields only implemented in dimension 1")
w = K.absolute_generator()
#we need to dehomogenize for the Weil restriction and will check that point at infty
#separately. We also check here that we are working with a polynomial. If the map
#is not a polynomial, the Weil restriction will not be a morphism and we cannot
#apply this algorithm.
g = self.dehomogenize(1)
inf = PS([1,0])
k = 1
if isinstance(g[0], FractionFieldElement):
g = self.dehomogenize(0)
inf = PS([0,1])
k = 0
if isinstance(g[0], FractionFieldElement):
raise NotImplementedError("rational periodic points for number fields only implemented for polynomials")
#determine rational periodic points
#infinity is a totally ramified fixed point for a polynomial
periodic_points = set([inf])
#compute the weil resctriction
G = g.weil_restriction()
F = G.homogenize(d)
#find the QQ rational periodic points for the weil restriction
Fper = F.rational_periodic_points(**kwds)
for P in Fper:
#take the 'good' points in the weil restriction and find the
#associated number field points.
if P[d] == 1:
pt = [sum([P[i]*w**i for i in range(d)])]
pt.insert(k,1)
Q = PS(pt)
#for each periodic point get the entire cycle
if not Q in periodic_points:
#check periodic not preperiodic and add all points in cycle
orb = set([Q])
Q2 = self(Q)
while Q2 not in orb:
orb.add(Q2)
Q2 = self(Q2)
if Q2 == Q:
periodic_points = periodic_points.union(orb)
return list(periodic_points)
else:
primebound = kwds.pop("prime_bound", [1, 20])
p = kwds.pop("lifting_prime", 23)
periods = kwds.pop("periods", None)
badprimes = kwds.pop("bad_primes", None)
num_cpus = kwds.pop("ncpus", ncpus())
if (isinstance(primebound, (list, tuple)) == False):
try:
primebound = [1, ZZ(primebound)]
except TypeError:
raise TypeError("bound on primes must be an integer")
else:
try:
primebound[0] = ZZ(primebound[0])
primebound[1] = ZZ(primebound[1])
except TypeError:
raise TypeError("prime bounds must be integers")
if badprimes is None:
badprimes = self.primes_of_bad_reduction()
if periods is None:
periods = self.possible_periods(prime_bound=primebound, bad_primes=badprimes, ncpus=num_cpus)
PS = self.domain()
R = PS.base_ring()
periodic = set()
while p in badprimes:
p = next_prime(p + 1)
B = e ** self.height_difference_bound()
f = self.change_ring(GF(p))
all_points = f.possible_periods(True) #return the list of points and their periods.
pos_points = []
for i in range(len(all_points)):
if all_points[i][1] in periods and (all_points[i] in pos_points) == False: #check period, remove duplicates
pos_points.append(all_points[i])
periodic_points = self.lift_to_rational_periodic(pos_points,B)
for p,n in periodic_points:
for k in range(n):
p.normalize_coordinates()
periodic.add(p)
p = self(p)
return list(periodic)
else:
raise TypeError("base field must be an absolute number field")
def rational_preimages(self, Q):
r"""
Determine all of the rational first preimages of ``Q`` by this map.
Given a rational point `Q` in the domain of this map, return all the rational points `P`
in the domain with `self(P)==Q`. In other words, the set of first preimages of `Q`.
The map must be defined over number fields and be an endomorphism.
In ``Q`` is a subscheme, the return the subscheme that maps to ``Q`` by this map.
In particular, `f^{-1}(V(h_1,\ldots,h_t)) = V(h_1 \circ f, \ldots, h_t \circ f)`.
ALGORITHM:
points: Use elimination via groebner bases to find the rational pre-images
INPUT:
- ``Q`` - a rational point or subscheme in the domain of this map.
OUTPUT:
- a list of rational points or a subscheme in the domain of this map.
Examples::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([16*x^2 - 29*y^2, 16*y^2])
sage: f.rational_preimages(P(-1,4))
[(5/4 : 1), (-5/4 : 1)]
::
sage: P.<x,y,z> = ProjectiveSpace(QQ,2)
sage: H = End(P)
sage: f = H([76*x^2 - 180*x*y + 45*y^2 + 14*x*z + 45*y*z - 90*z^2, 67*x^2 - 180*x*y - 157*x*z + 90*y*z, -90*z^2])
sage: f.rational_preimages(P(-9,-4,1))
[(0 : 4 : 1)]
A non-periodic example ::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([x^2 + y^2, 2*x*y])
sage: f.rational_preimages(P(17,15))
[(5/3 : 1), (3/5 : 1)]
::
sage: P.<x,y,z,w> = ProjectiveSpace(QQ,3)
sage: H = End(P)
sage: f = H([x^2 - 2*y*w - 3*w^2, -2*x^2 + y^2 - 2*x*z + 4*y*w + 3*w^2, x^2 - y^2 + 2*x*z + z^2 - 2*y*w - w^2, w^2])
sage: f.rational_preimages(P(0,-1,0,1))
[]
::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([x^2 + y^2, 2*x*y])
sage: f.rational_preimages([CC.0,1])
Traceback (most recent call last):
...
TypeError: point must be in codomain of self
A number field example ::
sage: z = QQ['z'].0
sage: K.<a> = NumberField(z^2 - 2);
sage: P.<x,y> = ProjectiveSpace(K,1)
sage: H = End(P)
sage: f = H([x^2 + y^2, y^2])
sage: f.rational_preimages(P(3,1))
[(a : 1), (-a : 1)]
::
sage: z = QQ['z'].0
sage: K.<a> = NumberField(z^2 - 2);
sage: P.<x,y,z> = ProjectiveSpace(K,2)
sage: X = P.subscheme([x^2 - z^2])
sage: H = Hom(X,X)
sage: f= H([x^2 - z^2, a*y^2, z^2 - x^2])
sage: f.rational_preimages(X([1,2,-1]))
[]
::
sage: P.<x,y,z> = ProjectiveSpace(QQ,2)
sage: X = P.subscheme([x^2 - z^2])
sage: H = Hom(X,X)
sage: f= H([x^2-z^2, y^2, z^2-x^2])
sage: f.rational_preimages(X([0,1,0]))
Traceback (most recent call last):
...
NotImplementedError: subschemes as preimages not implemented
::
sage: P.<x, y> = ProjectiveSpace(QQ, 1)
sage: H = | |
(relevant only to EMM custom region cost_convert).
Returns:
A dict with the same form as base_dict and add_dict, with the
values for the particular census division specified in 'cd'
converted to the custom region 'cz'.
"""
# Extract lists of strings corresponding to the residential and
# commercial building types used to process these inputs
res_bldg_types = list(mseg.bldgtypedict.keys())
com_bldg_types = list(cm.CommercialTranslationDicts().bldgtypedict.keys())
# Extract lists of strings corresponding to the residential and
# commercial fuel types used to process these inputs
res_fuel_types = list(mseg.fueldict.keys())
com_fuel_types = list(cm.CommercialTranslationDicts().fueldict.keys())
for (k, i), (k2, i2) in zip(sorted(base_dict.items()),
sorted(add_dict.items())):
# Compare the top level/parent keys of the section of the dict
# currently being parsed to ensure that both the base_dict
# (census division basis) and add_dict (custom region basis)
# are proceeding with the same structure
if k == k2:
# Identify appropriate census division to custom region
# conversion weighting factor array as a function of building
# type; k and k2 correspond to the current top level/parent key,
# thus k and k2 are equal to a building type immediately
# prior to traversing the entire child tree for that
# building type, for which the conversion number array
# cd_to_cz_factor will be the same. Ensure that the walk is
# currently at the building type level by checking keys from the
# next level down (the fuel type level) against expected fuel types
if ((k in res_bldg_types and
any([x in res_fuel_types for x in base_dict[k].keys()])) or
(k in com_bldg_types and
any([x in com_fuel_types for x in base_dict[k].keys()]))):
if k in res_bldg_types:
cd_to_cz_factor = res_convert_array
elif k in com_bldg_types:
cd_to_cz_factor = com_convert_array
# Flag the current fuel type being updated, which is relevant
# to ultimate selection of conversion factor from the conversion
# array when translating to EMM region, in which case conversion
# factors are different for electric and non-electric fuels. Use
# the expectation that conversion arrays will be in dict format
# in the EMM region case (with keys for electric and non-electric
# conversion factors) to trigger the fuel flag update
elif (k in res_fuel_types or k in com_fuel_types) and \
type(res_convert_array) is dict:
fuel_flag = k
# Recursively loop through both dicts
if isinstance(i, dict):
merge_sum(i, i2, cd, cz, cd_dict, cd_list, res_convert_array,
com_convert_array, cd_to_cz_factor, fuel_flag)
elif type(base_dict[k]) is not str:
# Check whether the conversion array needs to be further keyed
# by fuel type, as is the case when converting to EMM region;
# in such cases, the fuel flag indicates the key value to use
if fuel_flag is not None:
# Find conversion factor for the electric fuel and given
# combination of census division and EMM region
if fuel_flag == "electricity":
convert_fact = cd_to_cz_factor["electric"][cd][cz]
# Find conversion data for the non-electric fuels and given
# combination of census division and EMM region
else:
convert_fact = cd_to_cz_factor["non-electric"][cd][cz]
else:
# Find the conversion factor for the given combination of
# census division and AIA climate zone
convert_fact = cd_to_cz_factor[cd][cz]
# Special handling of first dict (no addition of the
# second dict, only conversion of the first dict with
# the appropriate factor)
if (cd == (cd_dict[cd_list[0]] - 1)):
# In the special case of consumer choice/time
# preference premium data, the data are reported
# as a list and must be reprocessed using a list
# comprehension (or comparable looping approach)
if isinstance(base_dict[k], list):
base_dict[k] = [z * convert_fact for z
in base_dict[k]]
else:
base_dict[k] = base_dict[k] * convert_fact
else:
if isinstance(base_dict[k], list):
base_dict[k] = [sum(y) for y
in zip(base_dict[k],
[z * convert_fact for z
in add_dict[k2]])]
else:
base_dict[k] = (base_dict[k] +
add_dict[k2] * convert_fact)
else:
raise(KeyError('Merge keys do not match!'))
# Return a single dict representing sum of values of original two dicts
return base_dict
def clim_converter(input_dict, res_convert_array, com_convert_array):
"""Convert input data dict from a census division to a custom region basis.
This function principally serves to prepare the inputs for, and
then call, a function that performs the calculations to convert
data in the input_dict database specified for each microsegment
from a census division to a custom region basis.
Args:
input_dict (dict): Data from JSON database, as imported,
on a census division basis.
res_convert_array (numpy.ndarray): An array of census
division to custom region conversion factors for
residential building types.
com_convert_array (numpy.ndarray): Array of census
division to custom region conversion factors for
commercial building types.
Returns:
A complete dict with the same structure as input_dict,
except at the top level, where census division keys
have been replaced by custom region keys, and the data
have been updated to correspond to those custom regions.
"""
# Create an instance of the CommercialTranslationDicts object from
# com_mseg, which contains a dict that translates census division
# strings into the corresponding integer codes
cd = cm.CommercialTranslationDicts()
# Obtain list of all custom region names as strings
try:
cz_list = res_convert_array.dtype.names[1:]
# Handle conversion to EMM regions, in which custom region names will
# be one level-deep in a dict that breaks out conversion factors by the
# electric and non-electric fuels
except AttributeError:
cz_list = res_convert_array["electric"].dtype.names[1:]
# Obtain list of all census divisions in the input data
cd_list = list(input_dict.keys())
# Set up empty dict to be updated with the results for each custom
# region as the data are converted
converted_dict = {}
# Add the values from each custom region to the converted_dict
for cz_number, cz_name in enumerate(cz_list):
# Create a copy of the input dict at the level below a census
# division or custom region (the structure below that level
# should be identical); uses the first census division in
# cd_list each time
base_dict = copy.deepcopy(input_dict[cd_list[0]])
# Loop through all census divisions to add their contributions
# to each custom region
for cd_name in cd_list:
# Proceed only if the census division name is found in
# the dict specified in this function, otherwise raise
# a KeyError
if cd_name in cd.cdivdict.keys():
# Obtain the census division number from the dict
# and subtract 1 to make the number usable as a list
# index (1st list element is indexed by 0 in Python)
cd_number = cd.cdivdict[cd_name] - 1
# Make a copy of the portion of the input dict
# corresponding to the current census division
add_dict = copy.deepcopy(input_dict[cd_name])
# Call the merge_sum function to replace base_dict with
# updated contents; add 1 to the custom region number
# because it will be used as a column index for an
# array where the first column of data are in the
# second column (indexed as 1 in Python); this approach
# overwrites base_dict, which is intentional because it
# is the master dict that stores the data on a custom
# region basis as the contribution from each census
# division is added to the custom region by merge_sum
base_dict = merge_sum(base_dict, add_dict, cd_number,
(cz_number + 1), cd.cdivdict, cd_list,
res_convert_array, com_convert_array)
else:
raise(KeyError("Census division name not found in dict keys!"))
# Once fully updated with the data from all census divisions,
# write the resulting data to a new variable and update the
# master dict with the data using the appropriate census
# division string name as the key
newadd = base_dict
converted_dict.update({cz_name: newadd})
return converted_dict
def env_cpl_data_handler(
cpl_data, cost_convert, perf_convert, years, key_list):
"""Restructure envelope component cost, performance, and lifetime data.
This function extracts the cost, performance, and lifetime data for
the envelope components of residential and commercial buildings
from the original data and restructures it into a form that is
generally consistent with similar data originally obtained from
the Annual Energy Outlook (AEO). These data are added to the input
microsegments database after it is initially converted to a custom region
basis, and these data are reported by AIA | |
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
import mock
from openstack_dashboard import api
from troveclient import common
from trove_dashboard import api as trove_api
from trove_dashboard.content.database_clusters \
import cluster_manager
from trove_dashboard.content.database_clusters import tables
from trove_dashboard.test import helpers as test
from trove_dashboard.utils import common as common_utils
INDEX_URL = reverse('horizon:project:database_clusters:index')
LAUNCH_URL = reverse('horizon:project:database_clusters:launch')
DETAILS_URL = reverse('horizon:project:database_clusters:detail', args=['id'])
RESET_PASSWORD_VIEWNAME = 'horizon:project:database_clusters:reset_password'
class ClustersTests(test.TestCase):
@test.create_mocks({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index(self):
clusters = common.Paginated(self.trove_clusters.list())
self.mock_cluster_list.return_value = clusters
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.get(INDEX_URL)
self.mock_cluster_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
@test.create_mocks({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_flavor_exception(self):
clusters = common.Paginated(self.trove_clusters.list())
self.mock_cluster_list.return_value = clusters
self.mock_flavor_list.side_effect = self.exceptions.trove
res = self.client.get(INDEX_URL)
self.mock_cluster_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_mocks({trove_api.trove: ('cluster_list',)})
def test_index_list_exception(self):
self.mock_cluster_list.side_effect = self.exceptions.trove
res = self.client.get(INDEX_URL)
self.mock_cluster_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertMessageCount(res, error=1)
@test.create_mocks({trove_api.trove: ('cluster_list',
'flavor_list')})
def test_index_pagination(self):
clusters = self.trove_clusters.list()
last_record = clusters[1]
clusters = common.Paginated(clusters, next_marker="foo")
self.mock_cluster_list.return_value = clusters
self.mock_flavor_list.return_value = self.flavors.list()
res = self.client.get(INDEX_URL)
self.mock_cluster_list.assert_called_once_with(
test.IsHttpRequest(), marker=None)
self.mock_flavor_list.assert_called_once_with(test.IsHttpRequest())
self.assertTemplateUsed(res, 'project/database_clusters/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_mocks({trove_api.trove: ('datastore_flavors',
'datastore_list',
'datastore_version_list'),
api.base: ['is_service_enabled']})
def test_launch_cluster(self):
self.mock_is_service_enabled.return_value = False
self.mock_datastore_flavors.return_value = self.flavors.list()
filtered_datastores = self._get_filtered_datastores('mongodb')
self.mock_datastore_list.return_value = filtered_datastores
self.mock_datastore_version_list.return_value = (
self._get_filtered_datastore_versions(filtered_datastores))
res = self.client.get(LAUNCH_URL)
self.mock_is_service_enabled.assert_called_once_with(
test.IsHttpRequest(), 'network')
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), 'mongodb', '2.6')
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_version_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.assertTemplateUsed(res, 'project/database_clusters/launch.html')
def test_launch_cluster_mongo_fields(self):
datastore = 'mongodb'
datastore_version = '2.6'
fields = self.launch_cluster_fields_setup(datastore,
datastore_version)
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
self.assertTrue(self._contains_datastore_in_attribute(
fields[field_name], field_name))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances'], field_name))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_shards'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['root_password'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], field_name))
def test_launch_cluster_redis_fields(self):
datastore = 'redis'
datastore_version = '3.0'
fields = self.launch_cluster_fields_setup(datastore,
datastore_version)
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
self.assertTrue(self._contains_datastore_in_attribute(
fields[field_name], field_name))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_shards'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['root_password'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], field_name))
def test_launch_cluster_vertica_fields(self):
datastore = 'vertica'
datastore_version = '7.1'
fields = self.launch_cluster_fields_setup(datastore,
datastore_version)
field_name = self._build_flavor_widget_name(datastore,
datastore_version)
self.assertTrue(self._contains_datastore_in_attribute(
fields[field_name], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_instances'], field_name))
self.assertFalse(self._contains_datastore_in_attribute(
fields['num_shards'], field_name))
self.assertTrue(self._contains_datastore_in_attribute(
fields['root_password'], field_name))
self.assertTrue(self._contains_datastore_in_attribute(
fields['num_instances_vertica'], field_name))
@test.create_mocks({trove_api.trove: ('datastore_flavors',
'datastore_list',
'datastore_version_list'),
api.base: ['is_service_enabled']})
def launch_cluster_fields_setup(self, datastore, datastore_version):
self.mock_is_service_enabled.return_value = False
self.mock_datastore_flavors.return_value = self.flavors.list()
filtered_datastores = self._get_filtered_datastores(datastore)
self.mock_datastore_list.return_value = filtered_datastores
self.mock_datastore_version_list.return_value = (
self._get_filtered_datastore_versions(filtered_datastores))
res = self.client.get(LAUNCH_URL)
self.mock_is_service_enabled.assert_called_once_with(
test.IsHttpRequest(), 'network')
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), datastore, datastore_version)
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_version_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
return res.context_data['form'].fields
@test.create_mocks({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster(self):
self.mock_is_service_enabled.return_value = False
self.mock_datastore_flavors.return_value = self.flavors.list()
filtered_datastores = self._get_filtered_datastores('mongodb')
self.mock_datastore_list.return_value = filtered_datastores
self.mock_datastore_version_list.return_value = (
self._get_filtered_datastore_versions(filtered_datastores))
self.mock_cluster_create.return_value = self.trove_clusters.first()
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u''
field_name = self._build_flavor_widget_name(cluster_datastore,
cluster_datastore_version)
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
}
res = self.client.post(LAUNCH_URL, post)
self.mock_is_service_enabled.assert_called_once_with(
test.IsHttpRequest(), 'network')
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), 'mongodb', '2.6')
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_version_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_cluster_create.assert_called_once_with(
test.IsHttpRequest(),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=<PASSWORD>,
locality=None)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_mocks({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant'],
api.base: ['is_service_enabled']})
def test_create_simple_cluster_neutron(self):
self.mock_is_service_enabled.return_value = True
self.mock_network_list_for_tenant.return_value = self.networks.list()
self.mock_datastore_flavors.return_value = self.flavors.list()
filtered_datastores = self._get_filtered_datastores('mongodb')
self.mock_datastore_list.return_value = filtered_datastores
self.mock_datastore_version_list.return_value = (
self._get_filtered_datastore_versions(filtered_datastores))
self.mock_cluster_create.return_value = self.trove_clusters.first()
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u'82288d84-e0a5-42ac-95be-e6af08727e42'
field_name = self._build_flavor_widget_name(cluster_datastore,
cluster_datastore_version)
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': cluster_network,
}
res = self.client.post(LAUNCH_URL, post)
self.mock_is_service_enabled.assert_called_once_with(
test.IsHttpRequest(), 'network')
self.mock_network_list_for_tenant.assert_called_once_with(
test.IsHttpRequest(), '1')
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), 'mongodb', '2.6')
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_version_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_cluster_create.assert_called_once_with(
test.IsHttpRequest(),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=None,
locality=None)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_mocks({trove_api.trove: ['datastore_flavors',
'cluster_create',
'datastore_list',
'datastore_version_list'],
api.neutron: ['network_list_for_tenant']})
def test_create_simple_cluster_exception(self):
self.mock_network_list_for_tenant.return_value = self.networks.list()
self.mock_datastore_flavors.return_value = self.flavors.list()
filtered_datastores = self._get_filtered_datastores('mongodb')
self.mock_datastore_list.return_value = filtered_datastores
self.mock_datastore_version_list.return_value = (
self._get_filtered_datastore_versions(filtered_datastores))
self.mock_cluster_create.side_effect = self.exceptions.trove
cluster_name = u'MyCluster'
cluster_volume = 1
cluster_flavor = u'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
cluster_instances = 3
cluster_datastore = u'mongodb'
cluster_datastore_version = u'2.6'
cluster_network = u''
field_name = self._build_flavor_widget_name(cluster_datastore,
cluster_datastore_version)
post = {
'name': cluster_name,
'volume': cluster_volume,
'num_instances': cluster_instances,
'num_shards': 1,
'datastore': field_name,
field_name: 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
}
res = self.client.post(LAUNCH_URL, post)
self.mock_network_list_for_tenant.assert_called_once_with(
test.IsHttpRequest(), '1')
self.mock_datastore_flavors.assert_called_once_with(
test.IsHttpRequest(), 'mongodb', '2.6')
self.mock_datastore_list.assert_called_once_with(test.IsHttpRequest())
self.mock_datastore_version_list.assert_called_once_with(
test.IsHttpRequest(), test.IsA(str))
self.mock_cluster_create.assert_called_once_with(
test.IsHttpRequest(),
cluster_name,
cluster_volume,
cluster_flavor,
cluster_instances,
datastore=cluster_datastore,
datastore_version=cluster_datastore_version,
nics=cluster_network,
root_password=<PASSWORD>,
locality=None)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(error=1)
@test.create_mocks({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
self.mock_instance_get.return_value = self.databases.first()
self.mock_flavor_get.return_value = self.flavors.first()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_cluster_get, 2,
mock.call(test.IsHttpRequest(), cluster.id))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_instance_get, 3,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_flavor_get, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, cluster.ip[0])
@test.create_mocks({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details_without_locality(self):
cluster = self.trove_clusters.list()[1]
self.mock_cluster_get.return_value = cluster
self.mock_instance_get.return_value = self.databases.first()
self.mock_flavor_get.return_value = self.flavors.first()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_cluster_get, 2,
mock.call(test.IsHttpRequest(), cluster.id))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_instance_get, 3,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_flavor_get, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertNotContains(res, "Locality")
@test.create_mocks({trove_api.trove: ('cluster_get',
'instance_get',
'flavor_get',)})
def test_details_with_locality(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
self.mock_instance_get.return_value = self.databases.first()
self.mock_flavor_get.return_value = self.flavors.first()
details_url = reverse('horizon:project:database_clusters:detail',
args=[cluster.id])
res = self.client.get(details_url)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_cluster_get, 2,
mock.call(test.IsHttpRequest(), cluster.id))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_instance_get, 3,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_flavor_get, 4,
mock.call(test.IsHttpRequest(), test.IsA(str)))
self.assertTemplateUsed(res, 'project/database_clusters/'
'_detail_overview.html')
self.assertContains(res, "Locality")
@test.create_mocks(
{trove_api.trove: ('cluster_get',
'cluster_grow'),
cluster_manager: ('get',)})
def test_grow_cluster(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
cluster_volume = 1
flavor = self.flavors.first()
cluster_flavor = flavor.id
cluster_flavor_name = flavor.name
instances = [
cluster_manager.ClusterInstance("id1", "name1", cluster_flavor,
cluster_flavor_name,
cluster_volume, "master", None,
None),
cluster_manager.ClusterInstance("id2", "name2", cluster_flavor,
cluster_flavor_name,
cluster_volume, "slave",
"master", None),
cluster_manager.ClusterInstance("id3", None, cluster_flavor,
cluster_flavor_name,
cluster_volume, None, None, None),
]
manager = cluster_manager.ClusterInstanceManager(cluster.id)
manager.instances = instances
self.mock_get.return_value = manager
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
table = res.context_data[
"".join([tables.ClusterGrowInstancesTable.Meta.name, '_table'])]
self.assertEqual(len(cluster.instances), len(table.data))
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowRemoveInstance.name, '__',
'id1'])
self.client.post(url, {'action': action})
self.assertEqual(len(cluster.instances) - 1, len(table.data))
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
res = self.client.post(url, {'action': action})
self.mock_cluster_get.assert_called_once_with(
test.IsHttpRequest(), cluster.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get, 5, mock.call(cluster.id))
self.mock_cluster_grow.assert_called_once_with(
test.IsHttpRequest(), cluster.id, instances)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({trove_api.trove: ('cluster_get',)})
def test_grow_cluster_no_instances(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
self.client.post(url, {'action': action})
self.mock_cluster_get.assert_called_once_with(
test.IsHttpRequest(), cluster.id)
self.assertMessageCount(info=1)
@test.create_mocks(
{trove_api.trove: ('cluster_get',
'cluster_grow',),
cluster_manager: ('get',)})
def test_grow_cluster_exception(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
cluster_volume = 1
flavor = self.flavors.first()
cluster_flavor = flavor.id
cluster_flavor_name = flavor.name
instances = [
cluster_manager.ClusterInstance("id1", "name1", cluster_flavor,
cluster_flavor_name,
cluster_volume, "master", None,
None),
cluster_manager.ClusterInstance("id2", "name2", cluster_flavor,
cluster_flavor_name,
cluster_volume, "slave",
"master", None),
cluster_manager.ClusterInstance("id3", None, cluster_flavor,
cluster_flavor_name,
cluster_volume, None, None, None),
]
manager = cluster_manager.ClusterInstanceManager(cluster.id)
manager.instances = instances
self.mock_get.return_value = manager
self.mock_cluster_grow.side_effect = self.exceptions.trove
url = reverse('horizon:project:database_clusters:cluster_grow_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_grow_details.html')
toSuppress = ["trove_dashboard.content.database_clusters.tables"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
action = "".join([tables.ClusterGrowInstancesTable.Meta.name, '__',
tables.ClusterGrowAction.name, '__',
cluster.id])
res = self.client.post(url, {'action': action})
self.mock_cluster_get.assert_called_once_with(
test.IsHttpRequest(), cluster.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get, 3, mock.call(cluster.id))
self.mock_cluster_grow.assert_called_once_with(
test.IsHttpRequest(), cluster.id, instances)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_mocks({trove_api.trove: ('cluster_get',
'cluster_shrink')})
def test_shrink_cluster(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
instance_id = cluster.instances[0]['id']
cluster_instances = [{'id': instance_id}]
url = reverse(
'horizon:project:database_clusters:cluster_shrink_details',
args=[cluster.id])
res = self.client.get(url)
self.assertTemplateUsed(
res, 'project/database_clusters/cluster_shrink_details.html')
table = res.context_data[
"".join([tables.ClusterShrinkInstancesTable.Meta.name, '_table'])]
self.assertEqual(len(cluster.instances), len(table.data))
action = "".join([tables.ClusterShrinkInstancesTable.Meta.name, '__',
tables.ClusterShrinkAction.name, '__',
instance_id])
res = self.client.post(url, {'action': action})
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_cluster_get, 2,
mock.call(test.IsHttpRequest(), cluster.id))
self.mock_cluster_shrink.assert_called_once_with(
test.IsHttpRequest(), cluster.id, cluster_instances)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_mocks({trove_api.trove: ('cluster_get',
'cluster_shrink')})
def test_shrink_cluster_exception(self):
cluster = self.trove_clusters.first()
self.mock_cluster_get.return_value = cluster
instance_id = cluster.instances[0]['id']
cluster_instances = [{'id': instance_id}]
self.mock_cluster_shrink.side_effect = self.exceptions.trove
url = reverse(
'horizon:project:database_clusters:cluster_shrink_details',
args=[cluster.id])
action = "".join([tables.ClusterShrinkInstancesTable.Meta.name, '__',
tables.ClusterShrinkAction.name, '__',
instance_id])
toSuppress = ["trove_dashboard.content.database_clusters.tables"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
res = self.client.post(url, {'action': action})
self.mock_cluster_get.assert_called_once_with(
test.IsHttpRequest(), cluster.id)
self.mock_cluster_shrink.assert_called_once_with(
test.IsHttpRequest(), cluster.id, cluster_instances)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
finally:
# Restore the | |
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOperatorDeclaration" ):
listener.enterOperatorDeclaration(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOperatorDeclaration" ):
listener.exitOperatorDeclaration(self)
def operatorDeclaration(self):
localctx = EvansParser.OperatorDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_operatorDeclaration)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 262
self.match(EvansParser.ID)
self.state = 263
self.match(EvansParser.T__3)
self.state = 265
self._errHandler.sync(self)
_la = self._input.LA(1)
if ((((_la - 49)) & ~0x3f) == 0 and ((1 << (_la - 49)) & ((1 << (EvansParser.LIST - 49)) | (1 << (EvansParser.BOOL - 49)) | (1 << (EvansParser.STR - 49)) | (1 << (EvansParser.FLOAT - 49)) | (1 << (EvansParser.INT - 49)) | (1 << (EvansParser.NUM - 49)) | (1 << (EvansParser.VAR - 49)) | (1 << (EvansParser.ID - 49)))) != 0):
self.state = 264
self.genParameters()
self.state = 267
self.match(EvansParser.T__4)
self.state = 268
self.match(EvansParser.T__0)
self.state = 269
self.operatorBody()
self.state = 270
self.match(EvansParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OperatorBodyContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def EFF(self):
return self.getToken(EvansParser.EFF, 0)
def operatorCodeBlock(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(EvansParser.OperatorCodeBlockContext)
else:
return self.getTypedRuleContext(EvansParser.OperatorCodeBlockContext,i)
def WHEN(self):
return self.getToken(EvansParser.WHEN, 0)
def genExpression(self):
return self.getTypedRuleContext(EvansParser.GenExpressionContext,0)
def EXEC(self):
return self.getToken(EvansParser.EXEC, 0)
def getRuleIndex(self):
return EvansParser.RULE_operatorBody
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOperatorBody" ):
listener.enterOperatorBody(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOperatorBody" ):
listener.exitOperatorBody(self)
def operatorBody(self):
localctx = EvansParser.OperatorBodyContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_operatorBody)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 275
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==EvansParser.WHEN:
self.state = 272
self.match(EvansParser.WHEN)
self.state = 273
self.match(EvansParser.T__2)
self.state = 274
self.genExpression(0)
self.state = 277
self.match(EvansParser.EFF)
self.state = 278
self.match(EvansParser.T__2)
self.state = 279
self.operatorCodeBlock()
self.state = 283
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==EvansParser.EXEC:
self.state = 280
self.match(EvansParser.EXEC)
self.state = 281
self.match(EvansParser.T__2)
self.state = 282
self.operatorCodeBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OperatorCodeBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockStatement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(EvansParser.BlockStatementContext)
else:
return self.getTypedRuleContext(EvansParser.BlockStatementContext,i)
def getRuleIndex(self):
return EvansParser.RULE_operatorCodeBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOperatorCodeBlock" ):
listener.enterOperatorCodeBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOperatorCodeBlock" ):
listener.exitOperatorCodeBlock(self)
def operatorCodeBlock(self):
localctx = EvansParser.OperatorCodeBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_operatorCodeBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 286
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 285
self.blockStatement()
self.state = 288
self._errHandler.sync(self)
_la = self._input.LA(1)
if not (((((_la - 4)) & ~0x3f) == 0 and ((1 << (_la - 4)) & ((1 << (EvansParser.T__3 - 4)) | (1 << (EvansParser.T__16 - 4)) | (1 << (EvansParser.STRING_LITERAL - 4)) | (1 << (EvansParser.DECIMAL_LITERAL - 4)) | (1 << (EvansParser.FLOAT_LITERAL - 4)) | (1 << (EvansParser.BOOL_LITERAL - 4)) | (1 << (EvansParser.IF - 4)) | (1 << (EvansParser.FOR - 4)) | (1 << (EvansParser.WHILE - 4)) | (1 << (EvansParser.RET - 4)) | (1 << (EvansParser.BREAK - 4)) | (1 << (EvansParser.CONT - 4)) | (1 << (EvansParser.LIST - 4)) | (1 << (EvansParser.BOOL - 4)) | (1 << (EvansParser.STR - 4)) | (1 << (EvansParser.FLOAT - 4)) | (1 << (EvansParser.INT - 4)) | (1 << (EvansParser.NUM - 4)) | (1 << (EvansParser.VAR - 4)) | (1 << (EvansParser.ADD - 4)) | (1 << (EvansParser.SUB - 4)) | (1 << (EvansParser.ID - 4)))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenCodeBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def blockStatement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(EvansParser.BlockStatementContext)
else:
return self.getTypedRuleContext(EvansParser.BlockStatementContext,i)
def getRuleIndex(self):
return EvansParser.RULE_genCodeBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGenCodeBlock" ):
listener.enterGenCodeBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGenCodeBlock" ):
listener.exitGenCodeBlock(self)
def genCodeBlock(self):
localctx = EvansParser.GenCodeBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_genCodeBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 290
self.match(EvansParser.T__0)
self.state = 294
self._errHandler.sync(self)
_la = self._input.LA(1)
while ((((_la - 4)) & ~0x3f) == 0 and ((1 << (_la - 4)) & ((1 << (EvansParser.T__3 - 4)) | (1 << (EvansParser.T__16 - 4)) | (1 << (EvansParser.STRING_LITERAL - 4)) | (1 << (EvansParser.DECIMAL_LITERAL - 4)) | (1 << (EvansParser.FLOAT_LITERAL - 4)) | (1 << (EvansParser.BOOL_LITERAL - 4)) | (1 << (EvansParser.IF - 4)) | (1 << (EvansParser.FOR - 4)) | (1 << (EvansParser.WHILE - 4)) | (1 << (EvansParser.RET - 4)) | (1 << (EvansParser.BREAK - 4)) | (1 << (EvansParser.CONT - 4)) | (1 << (EvansParser.LIST - 4)) | (1 << (EvansParser.BOOL - 4)) | (1 << (EvansParser.STR - 4)) | (1 << (EvansParser.FLOAT - 4)) | (1 << (EvansParser.INT - 4)) | (1 << (EvansParser.NUM - 4)) | (1 << (EvansParser.VAR - 4)) | (1 << (EvansParser.ADD - 4)) | (1 << (EvansParser.SUB - 4)) | (1 << (EvansParser.ID - 4)))) != 0):
self.state = 291
self.blockStatement()
self.state = 296
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 297
self.match(EvansParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def varDeclarationStatement(self):
return self.getTypedRuleContext(EvansParser.VarDeclarationStatementContext,0)
def genStatement(self):
return self.getTypedRuleContext(EvansParser.GenStatementContext,0)
def assignmentStatement(self):
return self.getTypedRuleContext(EvansParser.AssignmentStatementContext,0)
def getRuleIndex(self):
return EvansParser.RULE_blockStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockStatement" ):
listener.enterBlockStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockStatement" ):
listener.exitBlockStatement(self)
def blockStatement(self):
localctx = EvansParser.BlockStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_blockStatement)
try:
self.state = 302
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,34,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 299
self.varDeclarationStatement()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 300
self.genStatement()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 301
self.assignmentStatement()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDeclarationStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def genVarDeclaration(self):
return self.getTypedRuleContext(EvansParser.GenVarDeclarationContext,0)
def variableInitializer(self):
return self.getTypedRuleContext(EvansParser.VariableInitializerContext,0)
def getRuleIndex(self):
return EvansParser.RULE_varDeclarationStatement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarDeclarationStatement" ):
listener.enterVarDeclarationStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarDeclarationStatement" ):
listener.exitVarDeclarationStatement(self)
def varDeclarationStatement(self):
localctx = EvansParser.VarDeclarationStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_varDeclarationStatement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 304
self.genVarDeclaration()
self.state = 307
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==EvansParser.T__8:
self.state = 305
self.match(EvansParser.T__8)
self.state = 306
self.variableInitializer()
self.state = 309
self.match(EvansParser.T__9)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GenStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return EvansParser.RULE_genStatement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class IfStatementContext(GenStatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext
super().__init__(parser)
self.copyFrom(ctx)
def IF(self):
return self.getToken(EvansParser.IF, 0)
def genExpression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(EvansParser.GenExpressionContext)
else:
return self.getTypedRuleContext(EvansParser.GenExpressionContext,i)
def genCodeBlock(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(EvansParser.GenCodeBlockContext)
else:
return self.getTypedRuleContext(EvansParser.GenCodeBlockContext,i)
def ELIF(self, i:int=None):
if i is None:
return self.getTokens(EvansParser.ELIF)
else:
return self.getToken(EvansParser.ELIF, i)
def ELSE(self):
return self.getToken(EvansParser.ELSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIfStatement" ):
listener.enterIfStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIfStatement" ):
listener.exitIfStatement(self)
class BreakContStatementContext(GenStatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext
super().__init__(parser)
self.copyFrom(ctx)
def BREAK(self):
return self.getToken(EvansParser.BREAK, 0)
def CONT(self):
return self.getToken(EvansParser.CONT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBreakContStatement" ):
listener.enterBreakContStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBreakContStatement" ):
listener.exitBreakContStatement(self)
class ExpressionStatementContext(GenStatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext
super().__init__(parser)
self.copyFrom(ctx)
def genExpression(self):
return self.getTypedRuleContext(EvansParser.GenExpressionContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExpressionStatement" ):
listener.enterExpressionStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExpressionStatement" ):
listener.exitExpressionStatement(self)
class WhileStatementContext(GenStatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext
super().__init__(parser)
self.copyFrom(ctx)
def WHILE(self):
return self.getToken(EvansParser.WHILE, 0)
def genExpression(self):
return self.getTypedRuleContext(EvansParser.GenExpressionContext,0)
def genCodeBlock(self):
return self.getTypedRuleContext(EvansParser.GenCodeBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhileStatement" ):
listener.enterWhileStatement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhileStatement" ):
listener.exitWhileStatement(self)
class ForStatementContext(GenStatementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext
super().__init__(parser)
self.copyFrom(ctx)
def FOR(self):
return self.getToken(EvansParser.FOR, 0)
def nameList(self):
return self.getTypedRuleContext(EvansParser.NameListContext,0)
def IN(self):
return self.getToken(EvansParser.IN, 0)
def genExpression(self):
return self.getTypedRuleContext(EvansParser.GenExpressionContext,0)
def genCodeBlock(self):
return self.getTypedRuleContext(EvansParser.GenCodeBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterForStatement" ):
| |
<filename>astropy/wcs/tests/test_wcs.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from ...extern import six
import os
import sys
import warnings
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp)
from ...tests.helper import raises, catch_warnings, pytest
from ... import wcs
from ...utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from ...tests.helper import pytest
from ...utils.misc import NumpyRNGContext
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
# test_maps() is a generator
def test_maps():
# test_map() is the function that is called to perform the generated test
def test_map(filename):
# the test parameter is the base name of the file to use; find
# the file in the installed wcs test directory
header = get_pkg_data_contents(
os.path.join("maps", filename), encoding='binary')
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
# get the list of the hdr files that we want to test
hdr_file_list = list(get_pkg_data_filenames("maps", "*.hdr"))
# actually perform a test for each one
for filename in hdr_file_list:
# use the base name of the file, because everything we yield
# will show up in the test name in the pandokia report
filename = os.path.basename(filename)
# yield a function name and parameters to make a generated test
yield test_map, filename
# AFTER we tested with every file that we found, check to see that we
# actually have the list we expect. If N=0, we will not have performed
# any tests at all. If N < n_data_files, we are missing some files,
# so we will have skipped some tests. Without this check, both cases
# happen silently!
# how many do we expect to see?
n_data_files = 28
if len(hdr_file_list) != n_data_files:
assert False, (
"test_maps has wrong number data files: found %d, expected "
" %d" % (len(hdr_file_list), n_data_files))
# b.t.w. If this assert happens, py.test reports one more test
# than it would have otherwise.
# test_spectra() is a generator
def test_spectra():
# test_spectrum() is the function that is called to perform the
# generated test
def test_spectrum(filename):
# the test parameter is the base name of the file to use; find
# the file in the installed wcs test directory
header = get_pkg_data_contents(
os.path.join("spectra", filename), encoding='binary')
wcsobj = wcs.WCS(header)
all = wcs.find_all_wcs(header)
assert len(all) == 9
# get the list of the hdr files that we want to test
hdr_file_list = list(get_pkg_data_filenames("spectra", "*.hdr"))
# actually perform a test for each one
for filename in hdr_file_list:
# use the base name of the file, because everything we yield
# will show up in the test name in the pandokia report
filename = os.path.basename(filename)
# yield a function name and parameters to make a generated test
yield test_spectrum, filename
# AFTER we tested with every file that we found, check to see that we
# actually have the list we expect. If N=0, we will not have performed
# any tests at all. If N < n_data_files, we are missing some files,
# so we will have skipped some tests. Without this check, both cases
# happen silently!
# how many do we expect to see?
n_data_files = 6
if len(hdr_file_list) != n_data_files:
assert False, (
"test_spectra has wrong number data files: found %d, expected "
" %d" % (len(hdr_file_list), n_data_files))
# b.t.w. If this assert happens, py.test reports one more test
# than it would have otherwise.
def test_units():
u = wcs.UnitConverter("log(MHz)", "ln(Hz)")
print(u.convert([1, 2, 3, 4]))
basic_units = "m s g rad sr K A mol cd".split()
derived_units = "Hz J W V N Pa C Ohm ohm S F Wb T H lm lx".split()
add_all_units = "eV Jy R G barn".split()
add_sup_units = "a yr pc bit byte Byte".split()
add_sub_units = "mag".split()
general_units = (
"deg arcmin arcsec mas d h min erg Ry u D DEGREE DEGREES".split())
astro_units = "Angstrom angstrom AU lyr beam solRad solMass solLum Sun".split()
device_units = "adu bin chan count ct photon ph pixel pix voxel".split()
sub_prefixes = "y z a f p n u m c d".split()
sup_prefixes = "da h k M G T P E Z Y".split()
def test_all_units():
def test_self(x):
# x appears in the test name. If we would have had an ambiguous
# test name, we had -xxx added to the unit name. Remove it if
# necessary.
if '-' in x:
x = x.split('-')[0]
# here is the test:
try:
u = wcs.UnitConverter(x, x)
except ValueError:
e = sys.exc_info()[1]
if str(e).startswith("ERROR 12 in wcsutrne") and \
x in ("S", "H", "D"):
return
else:
raise
assert u.scale == 1.0
assert u.offset == 0.0
assert u.power == 1.0
# list of all the units to test
all = sorted(basic_units + derived_units + add_all_units + add_sup_units
+ add_sub_units + general_units + astro_units + device_units)
# Pandokia has non-case-sensitve test names; since the unit name is
# showing up in the test name, we want to disambiguate any name collisions.
# Here is a list of all the lower-cased unit name names.
all_lower = [x.lower() for x in all]
# here are serial numbers to use to disambiguate
unique_tags = {}
for unit in all:
# disambiguate the test name, if necessary
l_unit = unit.lower()
if unit != l_unit and l_unit in all_lower:
n = unique_tags.get(l_unit, 1)
unique_tags[n] = n + 1
# the test will tear off the part after the '-'
unit = '%s-%d' % (unit, n)
# perform the test
yield test_self, unit
def test_unit_prefixes():
def test_self(x, p):
unit = p + x
try:
u = wcs.UnitConverter(unit, unit)
except ValueError:
e = sys.exc_info()[1]
if str(e) == "Potentially unsafe translation" and \
x in ("S", "H", "D"):
return
else:
raise
assert u.scale == 1.0
assert u.offset == 0.0
assert u.power == 1.0
for unit in (basic_units + derived_units + add_all_units):
for prefix in (sub_prefixes + sup_prefixes):
yield test_self, unit, prefix
for unit in add_sup_units:
for prefix in sup_prefixes:
yield test_self, unit, prefix
for unit in add_sub_units:
for prefix in sub_prefixes:
yield test_self, unit, prefix
def test_fixes():
"""
From github issue #36
"""
def run():
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
w = wcs.WCS(header)
with catch_warnings(wcs.FITSFixedWarning) as w:
run()
assert len(w) == 2
for item in w:
if 'unitfix' in str(item.message):
assert 'Hz' in str(item.message)
assert 'M/S' in str(item.message)
assert 'm/s' in str(item.message)
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
filename = get_pkg_data_filename('data/sip2.fits')
with catch_warnings(wcs.wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than the
# image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n)*np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
close_enough = 1e-8
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.all(np.abs(ww.wcs.pc-answer) < close_enough)
answer = np.array([[ 202.39265216, 47.17756518],
[ 202.39335826, 47.17754619],
[ 202.39406436, 47.1775272 ]])
assert np.all(np.abs(result-answer) < close_enough)
def test_load_fits_path():
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
def test_backward_compatible():
fits = get_pkg_data_filename('data/sip.fits')
w = wcs.WCS(fits)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
assert np.all(w.wcs_pix2world(data, 0) == w.wcs_pix2sky(data, 0))
assert np.all(w.wcs_world2pix(data, 0) == w.wcs_sky2pix(data, 0))
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
w = wcs.WCS({'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1})
xp, yp = w.wcs_world2pix(41., | |
<gh_stars>0
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Configuration-specific schema fields
"""
import os
import sys
import warnings
from zope.interface import implementer
from zope.schema import Bool as schema_Bool
from zope.schema import DottedName
from zope.schema import Field
from zope.schema import InterfaceField
from zope.schema import List
from zope.schema import PythonIdentifier as schema_PythonIdentifier
from zope.schema import Text
from zope.schema import ValidationError
from zope.schema.interfaces import IFromUnicode
from zope.schema.interfaces import InvalidValue
from zope.configuration.exceptions import ConfigurationError
from zope.configuration.interfaces import InvalidToken
from zope.configuration._compat import implementer_if_needed
__all__ = [
'Bool',
'GlobalObject',
'GlobalInterface',
'MessageID',
'Path',
'PythonIdentifier',
'Tokens',
]
class PythonIdentifier(schema_PythonIdentifier):
r"""
This class is like `zope.schema.PythonIdentifier`.
Let's look at an example:
>>> from zope.configuration.fields import PythonIdentifier
>>> class FauxContext(object):
... pass
>>> context = FauxContext()
>>> field = PythonIdentifier().bind(context)
Let's test the fromUnicode method:
>>> field.fromUnicode(u'foo')
'foo'
>>> field.fromUnicode(u'foo3')
'foo3'
>>> field.fromUnicode(u'_foo3')
'_foo3'
Now let's see whether validation works alright
>>> for value in (u'foo', u'foo3', u'foo_', u'_foo3', u'foo_3', u'foo3_'):
... _ = field.fromUnicode(value)
>>> from zope.schema import ValidationError
>>> for value in (u'3foo', u'foo:', u'\\', u''):
... try:
... field.fromUnicode(value)
... except ValidationError:
... print('Validation Error ' + repr(value))
Validation Error '3foo'
Validation Error 'foo:'
Validation Error '\\'
Validation Error ''
.. versionchanged:: 4.2.0
Extend `zope.schema.PythonIdentifier`, which implies that `fromUnicode`
validates the strings.
"""
def _validate(self, value):
super(PythonIdentifier, self)._validate(value)
if not value:
raise ValidationError(value).with_field_and_value(self, value)
@implementer_if_needed(IFromUnicode)
class GlobalObject(Field):
"""
An object that can be accessed as a module global.
The special value ``*`` indicates a value of `None`; this is
not validated against the *value_type*.
"""
_DOT_VALIDATOR = DottedName()
def __init__(self, value_type=None, **kw):
self.value_type = value_type
super(GlobalObject, self).__init__(**kw)
def _validate(self, value):
super(GlobalObject, self)._validate(value)
if self.value_type is not None:
self.value_type.validate(value)
def fromUnicode(self, value):
r"""
Find and return the module global at the path *value*.
>>> d = {'x': 1, 'y': 42, 'z': 'zope'}
>>> class fakeresolver(dict):
... def resolve(self, n):
... return self[n]
>>> fake = fakeresolver(d)
>>> from zope.schema import Int
>>> from zope.configuration.fields import GlobalObject
>>> g = GlobalObject(value_type=Int())
>>> gg = g.bind(fake)
>>> gg.fromUnicode("x")
1
>>> gg.fromUnicode(" x \n ")
1
>>> gg.fromUnicode("y")
42
>>> gg.fromUnicode("z")
Traceback (most recent call last):
...
WrongType: ('zope', (<type 'int'>, <type 'long'>), '')
>>> g = GlobalObject(constraint=lambda x: x%2 == 0)
>>> gg = g.bind(fake)
>>> gg.fromUnicode("x")
Traceback (most recent call last):
...
ConstraintNotSatisfied: 1
>>> gg.fromUnicode("y")
42
>>> g = GlobalObject()
>>> gg = g.bind(fake)
>>> print(gg.fromUnicode('*'))
None
"""
name = str(value.strip())
# special case, mostly for interfaces
if name == '*':
return None
try:
# Leading dots are allowed here to indicate current
# package, but not accepted by DottedName. Take care,
# though, because a single dot is valid to resolve, but
# not valid to pass to DottedName (as an empty string)
to_validate = name.lstrip('.')
if to_validate:
self._DOT_VALIDATOR.validate(to_validate)
except ValidationError as v:
v.with_field_and_value(self, name)
raise
try:
value = self.context.resolve(name)
except ConfigurationError as v:
raise ValidationError(v).with_field_and_value(self, name)
self.validate(value)
return value
@implementer_if_needed(IFromUnicode)
class GlobalInterface(GlobalObject):
"""
An interface that can be accessed from a module.
Example:
First, we need to set up a stub name resolver:
>>> from zope.interface import Interface
>>> class IFoo(Interface):
... pass
>>> class Foo(object):
... pass
>>> d = {'Foo': Foo, 'IFoo': IFoo}
>>> class fakeresolver(dict):
... def resolve(self, n):
... return self[n]
>>> fake = fakeresolver(d)
Now verify constraints are checked correctly:
>>> from zope.configuration.fields import GlobalInterface
>>> g = GlobalInterface()
>>> gg = g.bind(fake)
>>> gg.fromUnicode('IFoo') is IFoo
True
>>> gg.fromUnicode(' IFoo ') is IFoo
True
>>> gg.fromUnicode('Foo')
Traceback (most recent call last):
...
NotAnInterface: (<class 'Foo'>, ...
"""
def __init__(self, **kw):
super(GlobalInterface, self).__init__(InterfaceField(), **kw)
@implementer(IFromUnicode)
class Tokens(List):
"""
A list that can be read from a space-separated string.
"""
def fromUnicode(self, value):
r"""
Split the input string and convert it to *value_type*.
Consider GlobalObject tokens:
First, we need to set up a stub name resolver:
>>> d = {'x': 1, 'y': 42, 'z': 'zope', 'x.y.x': 'foo'}
>>> class fakeresolver(dict):
... def resolve(self, n):
... return self[n]
>>> fake = fakeresolver(d)
>>> from zope.configuration.fields import Tokens
>>> from zope.configuration.fields import GlobalObject
>>> g = Tokens(value_type=GlobalObject())
>>> gg = g.bind(fake)
>>> gg.fromUnicode(" \n x y z \n")
[1, 42, 'zope']
>>> from zope.schema import Int
>>> g = Tokens(value_type=
... GlobalObject(value_type=
... Int(constraint=lambda x: x%2 == 0)))
>>> gg = g.bind(fake)
>>> gg.fromUnicode("x y")
Traceback (most recent call last):
...
InvalidToken: 1 in x y
>>> gg.fromUnicode("z y")
Traceback (most recent call last):
...
InvalidToken: ('zope', (<type 'int'>, <type 'long'>), '') in z y
>>> gg.fromUnicode("y y")
[42, 42]
"""
value = value.strip()
if value:
vt = self.value_type.bind(self.context)
values = []
for s in value.split():
try:
v = vt.fromUnicode(s)
except ValidationError as ex:
raise InvalidToken("%s in %r" % (ex, value)).with_field_and_value(self, s)
else:
values.append(v)
else:
values = []
self.validate(values)
return values
class PathProcessor(object):
# Internal helper for manipulations on paths
@classmethod
def expand(cls, filename):
# Perform the expansions we want to have done. Returns a
# tuple: (path, needs_processing) If the second value is true,
# further processing should be done (the path isn't fully
# resolved); if false, the path should be used as is
filename = filename.strip()
# expanding a ~ at the front should generally result
# in an absolute path.
filename = os.path.expanduser(filename)
filename = os.path.expandvars(filename)
if os.path.isabs(filename):
return os.path.normpath(filename), False
return filename, True
@implementer_if_needed(IFromUnicode)
class Path(Text):
"""
A file path name, which may be input as a relative path
Input paths are converted to absolute paths and normalized.
"""
def fromUnicode(self, value):
r"""
Convert the input path to a normalized, absolute path.
Let's look at an example:
First, we need a "context" for the field that has a path
function for converting relative path to an absolute path.
We'll be careful to do this in an operating system independent fashion.
>>> from zope.configuration.fields import Path
>>> class FauxContext(object):
... def path(self, p):
... return os.path.join(os.sep, 'faux', 'context', p)
>>> context = FauxContext()
>>> field = Path().bind(context)
Lets try an absolute path first:
>>> import os
>>> p = os.path.join(os.sep, u'a', u'b')
>>> n = field.fromUnicode(p)
>>> n.split(os.sep)
['', 'a', 'b']
This should also work with extra spaces around the path:
>>> p = " \n %s \n\n " % p
>>> n = field.fromUnicode(p)
>>> n.split(os.sep)
['', 'a', 'b']
Environment variables are expanded:
>>> os.environ['path-test'] = '42'
>>> with_env = os.path.join(os.sep, u'a', u'${path-test}')
>>> n = field.fromUnicode(with_env)
>>> n.split(os.sep)
['', 'a', '42']
Now try a relative path:
>>> p = os.path.join(u'a', u'b')
>>> n = field.fromUnicode(p)
>>> n.split(os.sep)
['', 'faux', 'context', 'a', 'b']
The current user is expanded (these are implicitly relative paths):
>>> old_home = os.environ.get('HOME')
>>> os.environ['HOME'] = os.path.join(os.sep, 'HOME')
>>> n = field.fromUnicode('~')
>>> n.split(os.sep)
['', 'HOME']
>>> if old_home:
... os.environ['HOME'] = old_home
... else:
... del os.environ['HOME']
.. versionchanged:: 4.2.0
Start expanding home directories and environment variables.
"""
filename, needs_processing = PathProcessor.expand(value)
if needs_processing:
filename = self.context.path(filename)
return filename
@implementer_if_needed(IFromUnicode)
class Bool(schema_Bool):
"""
A boolean value.
Values may be input (in upper or lower case) as any of:
- yes / no
- y / n
- true / false
- t / f
.. caution::
Do not confuse this with :class:`zope.schema.Bool`.
That class will only parse ``"True"`` and ``"true"`` as
`True` values. Any other value will silently be accepted as
`False`. This class raises a validation error for unrecognized
input.
"""
def fromUnicode(self, value):
"""
Convert the input string to a boolean.
Example:
>>> from zope.configuration.fields import Bool
>>> Bool().fromUnicode(u"yes")
True
>>> Bool().fromUnicode(u"y")
True
>>> Bool().fromUnicode(u"true")
True
>>> Bool().fromUnicode(u"no")
False
>>> Bool().fromUnicode(u"surprise")
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.InvalidValue
"""
value = value.lower()
if value in ('1', 'true', 'yes', 't', 'y'):
return True
if | |
<filename>rqalpha_mod_vnpy/ctp/api.py
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import os
from rqalpha.const import ORDER_TYPE, SIDE, POSITION_EFFECT
from .data_dict import TickDict, PositionDict, AccountDict, InstrumentDict, OrderDict, TradeDict, CommissionDict
from ..vnpy import *
from ..utils import make_order_book_id
ORDER_TYPE_MAPPING = {
ORDER_TYPE.MARKET: defineDict["THOST_FTDC_OPT_AnyPrice"],
ORDER_TYPE.LIMIT: defineDict["THOST_FTDC_OPT_LimitPrice"],
}
SIDE_MAPPING = {
SIDE.BUY: defineDict['THOST_FTDC_D_Buy'],
SIDE.SELL: defineDict['THOST_FTDC_D_Sell'],
}
POSITION_EFFECT_MAPPING = {
POSITION_EFFECT.OPEN: defineDict['THOST_FTDC_OF_Open'],
POSITION_EFFECT.CLOSE: defineDict['THOST_FTDC_OF_Close'],
POSITION_EFFECT.CLOSE_TODAY: defineDict['THOST_FTDC_OF_CloseToday'],
}
def query_in_sync(func):
@wraps(func)
def wrapper(api, data, error, n, last):
api.req_id = max(api.req_id, n)
result = func(api, data, last)
if last:
api.gateway.on_query(api.api_name, n, result)
return wrapper
class CtpMdApi(MdApi):
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, api_name='ctp_md'):
super(CtpMdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.api_name = api_name
def onFrontConnected(self):
"""服务器连接"""
self.connected = True
self.login()
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error)
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
if error['ErrorID'] == 0:
self.logged_in = True
else:
self.gateway.on_err(error)
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error)
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
pass
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
pass
def onRtnDepthMarketData(self, data):
"""行情推送"""
tick_dict = TickDict(data)
if tick_dict.is_valid:
self.gateway.on_tick(tick_dict)
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
def connect(self):
"""初始化连接"""
if not self.connected:
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
self.createFtdcMdApi(self.temp_path)
self.registerFront(self.address)
self.init()
else:
self.login()
def subscribe(self, order_book_id):
"""订阅合约"""
ins_dict = self.gateway.get_ins_dict(order_book_id)
if ins_dict is None:
return None
instrument_id = ins_dict.instrument_id
if instrument_id:
self.subscribeMarketData(str(instrument_id))
def login(self):
"""登录"""
if not self.logged_in:
req = {
'UserID': self.user_id,
'Password': <PASSWORD>,
'BrokerID': self.broker_id,
}
self.req_id += 1
self.reqUserLogin(req, self.req_id)
return self.req_id
def close(self):
"""关闭"""
self.exit()
class CtpTdApi(TdApi):
def __init__(self, gateway, temp_path, user_id, password, broker_id, address, auth_code, user_production_info, api_name='ctp_td'):
super(CtpTdApi, self).__init__()
self.gateway = gateway
self.temp_path = temp_path
self.req_id = 0
self.connected = False
self.logged_in = False
self.authenticated = False
self.user_id = user_id
self.password = password
self.broker_id = broker_id
self.address = address
self.auth_code = auth_code
self.user_production_info = user_production_info
self.front_id = 0
self.session_id = 0
self.require_authentication = False
self.pos_cache = {}
self.ins_cache = {}
self.order_cache = {}
self.api_name = api_name
def onFrontConnected(self):
"""服务器连接"""
self.connected = True
if self.require_authentication:
self.authenticate()
else:
self.login()
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connected = False
self.logged_in = False
def onHeartBeatWarning(self, n):
"""心跳报警"""
pass
def onRspAuthenticate(self, data, error, n, last):
"""验证客户端回报"""
if error['ErrorID'] == 0:
self.authenticated = True
self.login()
else:
self.gateway.on_err(error)
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
if error['ErrorID'] == 0:
self.front_id = str(data['FrontID'])
self.session_id = str(data['SessionID'])
self.logged_in = True
self.qrySettlementInfoConfirm()
else:
self.gateway.on_err(error)
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
if error['ErrorID'] == 0:
self.logged_in = False
else:
self.gateway.on_err(error)
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
order_dict = OrderDict(data, rejected=True)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
self.gateway.on_err(error)
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
pass
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
def onRspLockInsert(self, data, error, n, last):
""""""
pass
def onRspCombActionInsert(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryOrder(self, data, last):
"""报单回报"""
order_dict = OrderDict(data)
if order_dict.is_valid:
self.order_cache[order_dict.order_id] = order_dict
if last:
return self.order_cache
def onRspQryTrade(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInvestorPosition(self, data, last):
"""持仓查询回报"""
if data['InstrumentID']:
order_book_id = make_order_book_id(data['InstrumentID'])
if order_book_id not in self.pos_cache:
self.pos_cache[order_book_id] = PositionDict(data, self.gateway.get_ins_dict(order_book_id))
else:
self.pos_cache[order_book_id].update_data(data)
if last:
return self.pos_cache
@query_in_sync
def onRspQryTradingAccount(self, data, last):
"""资金账户查询回报"""
return AccountDict(data)
def onRspQryInvestor(self, data, error, n, last):
""""""
pass
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
def onRspQryInstrumentMarginRate(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInstrumentCommissionRate(self, data, last):
"""请求查询合约手续费率响应"""
return CommissionDict(data)
def onRspQryExchange(self, data, error, n, last):
""""""
pass
def onRspQryProduct(self, data, error, n, last):
""""""
pass
@query_in_sync
def onRspQryInstrument(self, data, last):
"""合约查询回报"""
ins_dict = InstrumentDict(data)
if ins_dict.is_valid:
self.ins_cache[ins_dict.order_book_id] = ins_dict
if last:
return self.ins_cache
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
def onRspQrySettlementInfo(self, data, error, n, last):
""""""
pass
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
def onRspQryNotice(self, data, error, n, last):
""""""
pass
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
def onRspQryProductExchRate(self, data, error, n, last):
""""""
pass
def onRspQryProductGroup(self, data, error, n, last):
""""""
pass
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
def onRspQryQuote(self, data, error, n, last):
""""""
pass
def onRspQryLock(self, data, error, n, last):
""""""
pass
def onRspQryLockPosition(self, data, error, n, last):
""""""
pass
def onRspQryInvestorLevel(self, data, error, n, last):
""""""
pass
def onRspQryExecFreeze(self, data, error, n, last):
""""""
pass
def onRspQryCombInstrumentGuard(self, data, error, n, last):
""""""
pass
def onRspQryCombAction(self, data, error, n, last):
""""""
pass
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
def onRspError(self, error, n, last):
"""错误回报"""
self.gateway.on_err(error)
def onRtnOrder(self, data):
"""报单回报"""
order_dict = OrderDict(data)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onRtnTrade(self, data):
"""成交回报"""
trade_dict = TradeDict(data)
self.gateway.on_trade(trade_dict)
def onErrRtnOrderInsert(self, data, error):
"""发单错误回报(交易所)"""
self.gateway.on_err(error)
order_dict = OrderDict(data, rejected=True)
if order_dict.is_valid:
self.gateway.on_order(order_dict)
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
self.gateway.on_err(error)
def onRtnInstrumentStatus(self, data):
""""""
pass
def onRtnTradingNotice(self, data):
""""""
pass
def onRtnErrorConditionalOrder(self, data):
""""""
pass
def onRtnExecOrder(self, data):
""""""
pass
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
def onRtnQuote(self, data):
""""""
pass
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
def onErrRtnQuoteAction(self, data, error):
""""""
pass
def onRtnForQuoteRsp(self, data):
""""""
pass
def onRtnCFMMCTradingAccountToken(self, data):
""""""
pass
def onRtnLock(self, data):
""""""
pass
def onErrRtnLockInsert(self, data, error):
""""""
pass
def onRtnCombAction(self, data):
""""""
pass
def onErrRtnCombActionInsert(self, data, error):
""""""
pass
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
def onRspQueryCFMMCTradingAccountToken(self, data, error, n, last):
""""""
pass
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
def onErrRtnQueryBankBalanceByFuture(self, data, error):
| |
m.x407 == 0)
m.c698 = Constraint(expr= m.x406 - m.x408 - m.x410 - m.x412 == 0)
m.c699 = Constraint(expr= m.x407 - m.x409 - m.x411 - m.x413 == 0)
m.c700 = Constraint(expr= m.x416 - m.x422 - m.x424 == 0)
m.c701 = Constraint(expr= m.x417 - m.x423 - m.x425 == 0)
m.c702 = Constraint(expr= m.x420 - m.x426 - m.x428 - m.x430 == 0)
m.c703 = Constraint(expr= m.x421 - m.x427 - m.x429 - m.x431 == 0)
m.c704 = Constraint(expr= m.x436 - m.x444 - m.x446 == 0)
m.c705 = Constraint(expr= m.x437 - m.x445 - m.x447 == 0)
m.c706 = Constraint(expr= - m.x438 - m.x450 + m.x452 == 0)
m.c707 = Constraint(expr= - m.x439 - m.x451 + m.x453 == 0)
m.c708 = Constraint(expr= m.x440 - m.x454 - m.x456 == 0)
m.c709 = Constraint(expr= m.x441 - m.x455 - m.x457 == 0)
m.c710 = Constraint(expr= m.x442 - m.x458 - m.x460 - m.x462 == 0)
m.c711 = Constraint(expr= m.x443 - m.x459 - m.x461 - m.x463 == 0)
m.c712 = Constraint(expr=-log(1 + m.x304) + m.x308 + m.b482 <= 1)
m.c713 = Constraint(expr=-log(1 + m.x305) + m.x309 + m.b483 <= 1)
m.c714 = Constraint(expr= m.x304 - 10*m.b482 <= 0)
m.c715 = Constraint(expr= m.x305 - 10*m.b483 <= 0)
m.c716 = Constraint(expr= m.x308 - 2.39789527279837*m.b482 <= 0)
m.c717 = Constraint(expr= m.x309 - 2.39789527279837*m.b483 <= 0)
m.c718 = Constraint(expr=-1.2*log(1 + m.x306) + m.x310 + m.b484 <= 1)
m.c719 = Constraint(expr=-1.2*log(1 + m.x307) + m.x311 + m.b485 <= 1)
m.c720 = Constraint(expr= m.x306 - 10*m.b484 <= 0)
m.c721 = Constraint(expr= m.x307 - 10*m.b485 <= 0)
m.c722 = Constraint(expr= m.x310 - 2.87747432735804*m.b484 <= 0)
m.c723 = Constraint(expr= m.x311 - 2.87747432735804*m.b485 <= 0)
m.c724 = Constraint(expr= - 0.75*m.x318 + m.x326 + m.b486 <= 1)
m.c725 = Constraint(expr= - 0.75*m.x319 + m.x327 + m.b487 <= 1)
m.c726 = Constraint(expr= - 0.75*m.x318 + m.x326 - m.b486 >= -1)
m.c727 = Constraint(expr= - 0.75*m.x319 + m.x327 - m.b487 >= -1)
m.c728 = Constraint(expr= m.x318 - 2.87747432735804*m.b486 <= 0)
m.c729 = Constraint(expr= m.x319 - 2.87747432735804*m.b487 <= 0)
m.c730 = Constraint(expr= m.x326 - 2.15810574551853*m.b486 <= 0)
m.c731 = Constraint(expr= m.x327 - 2.15810574551853*m.b487 <= 0)
m.c732 = Constraint(expr=-1.5*log(1 + m.x320) + m.x328 + m.b488 <= 1)
m.c733 = Constraint(expr=-1.5*log(1 + m.x321) + m.x329 + m.b489 <= 1)
m.c734 = Constraint(expr= m.x320 - 2.87747432735804*m.b488 <= 0)
m.c735 = Constraint(expr= m.x321 - 2.87747432735804*m.b489 <= 0)
m.c736 = Constraint(expr= m.x328 - 2.03277599268042*m.b488 <= 0)
m.c737 = Constraint(expr= m.x329 - 2.03277599268042*m.b489 <= 0)
m.c738 = Constraint(expr= - m.x322 + m.x330 + m.b490 <= 1)
m.c739 = Constraint(expr= - m.x323 + m.x331 + m.b491 <= 1)
m.c740 = Constraint(expr= - m.x322 + m.x330 - m.b490 >= -1)
m.c741 = Constraint(expr= - m.x323 + m.x331 - m.b491 >= -1)
m.c742 = Constraint(expr= - 0.5*m.x324 + m.x330 + m.b490 <= 1)
m.c743 = Constraint(expr= - 0.5*m.x325 + m.x331 + m.b491 <= 1)
m.c744 = Constraint(expr= - 0.5*m.x324 + m.x330 - m.b490 >= -1)
m.c745 = Constraint(expr= - 0.5*m.x325 + m.x331 - m.b491 >= -1)
m.c746 = Constraint(expr= m.x322 - 2.87747432735804*m.b490 <= 0)
m.c747 = Constraint(expr= m.x323 - 2.87747432735804*m.b491 <= 0)
m.c748 = Constraint(expr= m.x324 - 7*m.b490 <= 0)
m.c749 = Constraint(expr= m.x325 - 7*m.b491 <= 0)
m.c750 = Constraint(expr= m.x330 - 3.5*m.b490 <= 0)
m.c751 = Constraint(expr= m.x331 - 3.5*m.b491 <= 0)
m.c752 = Constraint(expr=-1.25*log(1 + m.x332) + m.x342 + m.b492 <= 1)
m.c753 = Constraint(expr=-1.25*log(1 + m.x333) + m.x343 + m.b493 <= 1)
m.c754 = Constraint(expr= m.x332 - 2.15810574551853*m.b492 <= 0)
m.c755 = Constraint(expr= m.x333 - 2.15810574551853*m.b493 <= 0)
m.c756 = Constraint(expr= m.x342 - 1.43746550029693*m.b492 <= 0)
m.c757 = Constraint(expr= m.x343 - 1.43746550029693*m.b493 <= 0)
m.c758 = Constraint(expr=-0.9*log(1 + m.x334) + m.x344 + m.b494 <= 1)
m.c759 = Constraint(expr=-0.9*log(1 + m.x335) + m.x345 + m.b495 <= 1)
m.c760 = Constraint(expr= m.x334 - 2.15810574551853*m.b494 <= 0)
m.c761 = Constraint(expr= m.x335 - 2.15810574551853*m.b495 <= 0)
m.c762 = Constraint(expr= m.x344 - 1.03497516021379*m.b494 <= 0)
m.c763 = Constraint(expr= m.x345 - 1.03497516021379*m.b495 <= 0)
m.c764 = Constraint(expr=-log(1 + m.x328) + m.x346 + m.b496 <= 1)
m.c765 = Constraint(expr=-log(1 + m.x329) + m.x347 + m.b497 <= 1)
m.c766 = Constraint(expr= m.x328 - 2.03277599268042*m.b496 <= 0)
m.c767 = Constraint(expr= m.x329 - 2.03277599268042*m.b497 <= 0)
m.c768 = Constraint(expr= m.x346 - 1.10947836929589*m.b496 <= 0)
m.c769 = Constraint(expr= m.x347 - 1.10947836929589*m.b497 <= 0)
m.c770 = Constraint(expr= - 0.9*m.x336 + m.x348 + m.b498 <= 1)
m.c771 = Constraint(expr= - 0.9*m.x337 + m.x349 + m.b499 <= 1)
m.c772 = Constraint(expr= - 0.9*m.x336 + m.x348 - m.b498 >= -1)
m.c773 = Constraint(expr= - 0.9*m.x337 + m.x349 - m.b499 >= -1)
m.c774 = Constraint(expr= m.x336 - 3.5*m.b498 <= 0)
m.c775 = Constraint(expr= m.x337 - 3.5*m.b499 <= 0)
m.c776 = Constraint(expr= m.x348 - 3.15*m.b498 <= 0)
m.c777 = Constraint(expr= m.x349 - 3.15*m.b499 <= 0)
m.c778 = Constraint(expr= - 0.6*m.x338 + m.x350 + m.b500 <= 1)
m.c779 = Constraint(expr= - 0.6*m.x339 + m.x351 + m.b501 <= 1)
m.c780 = Constraint(expr= - 0.6*m.x338 + m.x350 - m.b500 >= -1)
m.c781 = Constraint(expr= - 0.6*m.x339 + m.x351 - m.b501 >= -1)
m.c782 = Constraint(expr= m.x338 - 3.5*m.b500 <= 0)
m.c783 = Constraint(expr= m.x339 - 3.5*m.b501 <= 0)
m.c784 = Constraint(expr= m.x350 - 2.1*m.b500 <= 0)
m.c785 = Constraint(expr= m.x351 - 2.1*m.b501 <= 0)
m.c786 = Constraint(expr=-1.1*log(1 + m.x340) + m.x352 + m.b502 <= 1)
m.c787 = Constraint(expr=-1.1*log(1 + m.x341) + m.x353 + m.b503 <= 1)
m.c788 = Constraint(expr= m.x340 - 3.5*m.b502 <= 0)
m.c789 = Constraint(expr= m.x341 - 3.5*m.b503 <= 0)
m.c790 = Constraint(expr= m.x352 - 1.6544851364539*m.b502 <= 0)
m.c791 = Constraint(expr= m.x353 - 1.6544851364539*m.b503 <= 0)
m.c792 = Constraint(expr= - 0.9*m.x342 + m.x374 + m.b504 <= 1)
m.c793 = Constraint(expr= - 0.9*m.x343 + m.x375 + m.b505 <= 1)
m.c794 = Constraint(expr= - 0.9*m.x342 + m.x374 - m.b504 >= -1)
m.c795 = Constraint(expr= - 0.9*m.x343 + m.x375 - m.b505 >= -1)
m.c796 = Constraint(expr= - m.x358 + m.x374 + m.b504 <= 1)
m.c797 = Constraint(expr= - m.x359 + m.x375 + m.b505 <= 1)
m.c798 = Constraint(expr= - m.x358 + m.x374 - m.b504 >= -1)
m.c799 = Constraint(expr= - m.x359 + m.x375 - m.b505 >= -1)
m.c800 = Constraint(expr= m.x342 - 1.43746550029693*m.b504 <= 0)
m.c801 = Constraint(expr= m.x343 - 1.43746550029693*m.b505 <= 0)
m.c802 = Constraint(expr= m.x358 - 7*m.b504 <= 0)
m.c803 = Constraint(expr= m.x359 - 7*m.b505 <= 0)
m.c804 = Constraint(expr= m.x374 - 7*m.b504 <= 0)
m.c805 = Constraint(expr= m.x375 - 7*m.b505 <= 0)
m.c806 = Constraint(expr=-log(1 + m.x344) + m.x376 + m.b506 <= 1)
m.c807 = Constraint(expr=-log(1 + m.x345) + m.x377 + m.b507 <= 1)
m.c808 = Constraint(expr= m.x344 - 1.03497516021379*m.b506 <= 0)
m.c809 = Constraint(expr= m.x345 - 1.03497516021379*m.b507 <= 0)
m.c810 = Constraint(expr= m.x376 - 0.710483612536911*m.b506 <= 0)
m.c811 = Constraint(expr= m.x377 - 0.710483612536911*m.b507 <= 0)
m.c812 = Constraint(expr=-0.7*log(1 + m.x354) + m.x378 + m.b508 <= 1)
m.c813 = Constraint(expr=-0.7*log(1 + m.x355) + m.x379 + m.b509 <= 1)
m.c814 = Constraint(expr= m.x354 - 1.10947836929589*m.b508 <= 0)
m.c815 = Constraint(expr= m.x355 - 1.10947836929589*m.b509 <= 0)
m.c816 = Constraint(expr= m.x378 - 0.522508489006913*m.b508 <= 0)
m.c817 = Constraint(expr= m.x379 - 0.522508489006913*m.b509 <= 0)
m.c818 = Constraint(expr=-0.65*log(1 + m.x356) + m.x380 + m.b510 <= 1)
m.c819 = Constraint(expr=-0.65*log(1 + m.x357) + m.x381 + m.b511 <= 1)
m.c820 = Constraint(expr=-0.65*log(1 + m.x362) + m.x380 + m.b510 <= 1)
m.c821 = Constraint(expr=-0.65*log(1 + m.x363) + m.x381 + m.b511 <= 1)
m.c822 = Constraint(expr= m.x356 - 1.10947836929589*m.b510 <= 0)
m.c823 = Constraint(expr= m.x357 - 1.10947836929589*m.b511 <= 0)
m.c824 = Constraint(expr= m.x362 - 8.15*m.b510 <= 0)
m.c825 = Constraint(expr= m.x363 - 8.15*m.b511 <= 0)
m.c826 = Constraint(expr= m.x380 - 1.43894002153683*m.b510 <= 0)
m.c827 = Constraint(expr= m.x381 - 1.43894002153683*m.b511 <= 0)
m.c828 = Constraint(expr= - m.x364 + m.x382 + m.b512 <= 1)
m.c829 = Constraint(expr= - m.x365 + m.x383 + m.b513 <= 1)
m.c830 = Constraint(expr= - m.x364 + m.x382 - m.b512 >= -1)
m.c831 = Constraint(expr= - m.x365 + m.x383 - m.b513 >= -1)
m.c832 = Constraint(expr= m.x364 - 2.1*m.b512 <= 0)
m.c833 = Constraint(expr= m.x365 - 2.1*m.b513 <= 0)
m.c834 = Constraint(expr= m.x382 - 2.1*m.b512 <= 0)
m.c835 = Constraint(expr= m.x383 - 2.1*m.b513 <= 0)
m.c836 = Constraint(expr= - m.x366 + m.x384 + m.b514 <= 1)
m.c837 = Constraint(expr= - m.x367 + m.x385 + m.b515 <= 1)
m.c838 = Constraint(expr= - m.x366 + m.x384 - m.b514 >= -1)
m.c839 = Constraint(expr= - m.x367 + m.x385 - m.b515 >= -1)
m.c840 = Constraint(expr= m.x366 - 2.1*m.b514 <= 0)
m.c841 = Constraint(expr= m.x367 - 2.1*m.b515 <= 0)
m.c842 = Constraint(expr= m.x384 - 2.1*m.b514 <= 0)
m.c843 = Constraint(expr= m.x385 - 2.1*m.b515 <= 0)
m.c844 = Constraint(expr=-0.75*log(1 + m.x368) + m.x386 + m.b516 <= 1)
m.c845 = Constraint(expr=-0.75*log(1 + m.x369) + m.x387 + m.b517 <= 1)
m.c846 = Constraint(expr= m.x368 - 1.6544851364539*m.b516 <= 0)
m.c847 = Constraint(expr= m.x369 - 1.6544851364539*m.b517 <= 0)
m.c848 = Constraint(expr= m.x386 - 0.732188035236726*m.b516 | |
#!/usr/bin/env python
import setpath
import unittest
import os
from bike import testdata
from bike.query.findDefinition import findAllPossibleDefinitionsByCoords
from bike.query.getTypeOf import getTypeOf,resolveImportedModuleOrPackage
from bike.parsing.newstuff import getModuleOrPackageUsingFQN
from bike.parsing.fastparserast import getRoot
from bike.testutils import *
class TestFindDefinitionByCoords(BRMTestCase):
def test_findsClassRef(self):
src=trimLines("""
class TheClass:
pass
a = TheClass()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,6)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def tests_findsMethodRef(self):
src=trimLines("""
class TheClass:
def theMethod(self):
pass
a = TheClass()
a.theMethod()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),5,3)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 100
def test_returnsOtherMethodsWithSameName(self):
src=trimLines("""
class TheClass:
def theMethod(self):
pass
a = SomeOtherClass()
a.theMethod()
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),5,3)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 50
def test_findsTemporaryDefinition(self):
src=trimLines("""
a = 3
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),2,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsArgumentDefinition(self):
src=trimLines("""
def someFunction(a):
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),2,8)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 17
assert defn[0].confidence == 100
def test_findsClassInstanceDefinition(self):
src=trimLines("""
class TheClass():
pass
a = TheClass()
print a
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,6)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsDefinitionInParentScope(self):
src=trimLines("""
a = 3
def foo(self):
b = a + 1
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,8)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsDefinitionWithinFunction(self):
src=trimLines("""
def foo(yadda):
a = someFunction()
print a
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),3,10)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsDefinitionFromSubsequentAssignment(self):
src=trimLines("""
def foo(yadda):
a = 3
print a
a = 5
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsDefinitionFromDefinition(self):
src=trimLines("""
def foo(yadda):
a = 3
print a
a = 5
""")
createSourceNodeAt(src,"mymodule")
defn = [x for x in findAllPossibleDefinitionsByCoords(os.path.abspath("mymodule.py"),4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 4
assert defn[0].confidence == 100
def test_findsClassRefUsingFromImportStatement(self):
src=trimLines("""
from a.b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
root = createSourceNodeAt(src,"a.foo")
root = createSourceNodeAt(classsrc, "a.b.bah")
module = getModuleOrPackageUsingFQN("a.foo")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,1,21)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStatement(self):
importsrc=trimLines("""
from a.b.bah import mytext
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingImportStatement(self):
importsrc=trimLines("""
import a.b.bah
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,14)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStarStatement(self):
importsrc=trimLines("""
from a.b.bah import *
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
createSourceNodeAt(importsrc,"a.foo")
createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromPackageImportModuleStatement(self):
importsrc=trimLines("""
from a.b import bah
print bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,10)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsImportedVariableRefInAFunctionArg(self):
importsrc=trimLines("""
from a.b import bah
someFunction(bah.mytext)
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,17)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefUsingFromImportStatementInFunction(self):
importsrc=trimLines("""
def foo:
from a.b.bah import mytext
print mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,3,10)]
assert defn[0].filename == os.path.abspath(os.path.join("a","b","bah.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefByImportingModule(self):
importsrc=trimLines("""
import a.b.bah
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
defn = self.helper(importsrc, src, 2, 14)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def test_findsVariableRefByImportingModuleWithFrom(self):
importsrc=trimLines("""
from a.b import bah
someFunction(bah.mytext)
""")
src=trimLines("""
mytext = 'hello'
""")
defn = self.helper(importsrc, src, 2, 17)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 0
assert defn[0].confidence == 100
def helper(self, src, classsrc, line, col):
try:
createPackageStructure(src,classsrc)
filename = pkgstructureFile1
#Root(None,None,[pkgstructureRootDir])
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,line,col)]
finally:
removePackageStructure()
return defn
def test_doesntfindVariableRefOfUnimportedModule(self):
importsrc=trimLines("""
# a.b.bah not imported
print a.b.bah.mytext
""")
src=trimLines("""
mytext = 'hello'
""")
root = createSourceNodeAt(importsrc,"a.b.foo")
root = createSourceNodeAt(src, "a.b.bah")
filename = os.path.abspath(os.path.join("a","b","foo.py"))
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,14)]
self.assertEqual(defn,[])
def test_findsSelfAttributeDefinition(self):
src=trimLines("""
class MyClass:
def __init__(self):
self.a = 'hello'
def myMethod(self):
print self.a
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,5,18)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 12
assert defn[0].confidence == 100
def test_findsSelfAttributeDefinitionFromSamePlace(self):
src=trimLines("""
class MyClass:
def __init__(self):
self.a = 'hello'
def myMethod(self):
print self.a
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,3,12)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 3
assert defn[0].colno == 12
assert defn[0].confidence == 100
def test_findsSelfAttributeDefinition(self):
src=trimLines("""
class MyClass:
def someOtherFn(self):
pass
def load(self, source):
# fastparser ast
self.fastparseroot = fastparser(source,self.modulename)
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,6,14)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 6
assert defn[0].colno == 13
assert defn[0].confidence == 100
def test_findsDefnOfInnerClass(self):
src = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,4,14)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 2
assert defn[0].colno == 10
assert defn[0].confidence == 100
def test_findsDefnOfOuterClass(self):
src = trimLines("""
class TheClass:
class TheClass:
pass
a = TheClass.TheClass()
""")
root = createSourceNodeAt(src,"mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,4,4)]
assert defn[0].filename == os.path.abspath("mymodule.py")
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsClassDeclaredIn__init__Module(self):
importsrc=trimLines("""
class TheClass:
pass
""")
src=trimLines("""
from a import TheClass
c = TheClass()
""")
root = createSourceNodeAt(importsrc,"a.__init__")
root = createSourceNodeAt(src, "mymodule")
filename = os.path.abspath("mymodule.py")
defn = [x for x in findAllPossibleDefinitionsByCoords(filename,2,6)]
assert defn[0].filename == os.path.abspath(os.path.join("a",
"__init__.py"))
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
class TestFindDefinitionUsingFiles(BRMTestCase):
def test_findsASimpleDefinitionUsingFiles(self):
src=trimLines("""
class TheClass:
pass
a = TheClass()
""")
writeTmpTestFile(src)
defn = [x for x in findAllPossibleDefinitionsByCoords(tmpfile,3,6)]
assert defn[0].filename == tmpfile
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsDefinitionInAnotherModuleUsingFiles(self):
src=trimLines("""
from a.b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
defn = self.helper(src, classsrc, 1, 21)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsDefinitionInAnotherRelativeModuleUsingFiles(self):
src=trimLines("""
from b.bah import TheClass
""")
classsrc=trimLines("""
class TheClass:
pass
""")
defn = self.helper(src, classsrc,1,21)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 1
assert defn[0].colno == 6
assert defn[0].confidence == 100
def test_findsMethodDefinitionInAnotherModuleUsingFiles(self):
src=trimLines("""
from b.bah import TheClass
a = TheClass()
a.theMethod()
""")
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
""")
defn = self.helper(src, classsrc, 3, 2)
assert defn[0].filename == pkgstructureFile2
assert defn[0].lineno == 2
assert defn[0].colno == 8
assert defn[0].confidence == 100
def test_findsDefinitonOfMethodWhenUseIsOnAMultiLine(self):
classsrc=trimLines("""
class TheClass:
def theMethod(self):
pass
| |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.411735,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.47859,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.144,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.315792,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.828175,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.300609,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.484871,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.244747,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.03023,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.216839,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.67756,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.15646,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0126089,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.143035,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0932505,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.299495,
'Execution Unit/Register Files/Runtime Dynamic': 0.105859,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.337343,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.759655,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.61691,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000561368,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000492564,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192656,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133955,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00295485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00525326,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0896441,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.70213,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.185252,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304472,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19738,
'Instruction Fetch Unit/Runtime Dynamic': 0.587575,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0331415,
'L2/Runtime Dynamic': 0.00690054,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74011,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.20486,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0809777,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1225,
'Load Store Unit/Runtime Dynamic': 1.6852,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.199677,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.399355,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0708661,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0713609,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354538,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0303777,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.632382,
'Memory Management Unit/Runtime Dynamic': 0.101739,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.2524,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.411575,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0185714,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.145597,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
import numpy as np
from utils import *
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, AvgPool2D, MaxPool2D
from tensorflow.keras.layers import Dense, Flatten, Dropout
from tensorflow.keras.losses import CategoricalCrossentropy, SparseCategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy, SparseCategoricalAccuracy
from keras.preprocessing.image import ImageDataGenerator
# reupdated
class ResNet34 :
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build_net(self):
inputs = keras.Input(shape=self.input_shape)
# HEAD
Conv = Conv2D(filters=64, kernel_size=7, strides=2, activation="relu", padding="same", name="Conv")(inputs)
Pool = MaxPool2D(pool_size=3, strides=2, padding="same")(Conv)
# BODY
RB1 = Resblock(Pool, knum=64, layer_name="RB1")
RB2 = Resblock(RB1, knum=64, layer_name="RB2")
RB3 = Resblock(RB2, knum=64, layer_name="RB3")
RB4 = Resblock(RB3, knum=128, layer_name="RB4", verbose=True)
RB5 = Resblock(RB4, knum=128, layer_name="RB5")
RB6 = Resblock(RB5, knum=128, layer_name="RB6")
RB7 = Resblock(RB6, knum=128, layer_name="RB7")
RB8 = Resblock(RB7, knum=256, layer_name="RB8", verbose=True)
RB9 = Resblock(RB8, knum=256, layer_name="RB9")
RB10 = Resblock(RB9, knum=256, layer_name="RB10")
RB11 = Resblock(RB10, knum=256, layer_name="RB11")
RB12 = Resblock(RB11, knum=256, layer_name="RB12")
RB13 = Resblock(RB12, knum=256, layer_name="RB13")
RB14 = Resblock(RB13, knum=512, layer_name="RB14", verbose=True)
RB15 = Resblock(RB14, knum=512, layer_name="RB15")
RB16 = Resblock(RB15, knum=512, layer_name="RB16")
#TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB16)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer = keras.optimizers.Adam(0.002),
loss = SparseCategoricalCrossentropy(),
metrics = SparseCategoricalAccuracy()
)
return model
class ResNet50:
"""
based on Bottleneck Ver. Residual block.
"""
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build_net(self):
inputs = keras.Input(shape=self.input_shape)
# FRONT
Conv = Conv2D(filters=64, kernel_size=7, strides=2, padding="same", activation="relu", name="Conv")(inputs)
Pool = MaxPool2D(pool_size=3, strides=2, padding="same")(Conv)
# BODY
RB1 = Resblock_bn(input=Pool, knum_in=64, knum_out=256, layer_name="RB1")
RB2 = Resblock_bn(input=RB1, knum_in=64, knum_out=256, layer_name="RB2")
RB3 = Resblock_bn(input=RB2, knum_in=64, knum_out=256, layer_name="RB3")
RB4 = Resblock_bn(input=RB3, knum_in=128, knum_out=512, layer_name="RB4", verbose=True)
RB5 = Resblock_bn(input=RB4, knum_in=128, knum_out=512, layer_name="RB5")
RB6 = Resblock_bn(input=RB5, knum_in=128, knum_out=512, layer_name="RB6")
RB7 = Resblock_bn(input=RB6, knum_in=128, knum_out=512, layer_name="RB7")
RB8 = Resblock_bn(input=RB7, knum_in=256, knum_out=1024, layer_name="RB8", verbose=True)
RB9 = Resblock_bn(input=RB8, knum_in=256, knum_out=1024, layer_name="RB9")
RB10 = Resblock_bn(input=RB9, knum_in=256, knum_out=1024, layer_name="RB10")
RB11 = Resblock_bn(input=RB10,knum_in=256, knum_out=1024, layer_name="RB11")
RB12 = Resblock_bn(input=RB11, knum_in=256, knum_out=1024, layer_name="RB12")
RB13 = Resblock_bn(input=RB12, knum_in=256, knum_out=1024, layer_name="RB13")
RB14 = Resblock_bn(input=RB13, knum_in=512, knum_out=2048, layer_name="RB14", verbose=True)
RB15 = Resblock_bn(input=RB14, knum_in=512, knum_out=2048, layer_name="RB15")
RB16 = Resblock_bn(input=RB15, knum_in=512, knum_out=2048, layer_name="RB16")
# TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB16)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.002),
loss = SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy()
)
return model
class ResNet101:
def __init__(self, image_shape, num_classes):
self.image_shape = image_shape
self.num_classes = num_classes
def build_net(self):
input = keras.Input(self.image_shape)
# HEAD
Conv = Conv2D(filters=64, kernel_size=7, kernel_initializer="relu",
strides=2, padding="same", name="HEAD_conv")(input)
MaxP = MaxPool2D(pool_size=3, strides=2, padding="same")(Conv)
# BODY
# layers : 3
RB1 = Resblock(input=MaxP, knum=64, layer_name="RB1")
RB2 = Resblock(input=RB1, knum=64, layer_name="RB2")
RB3 = Resblock(input=RB2, knum=64, layer_name="RB3")
# layers : 4
RB4 = Resblock(input=RB3, knum=128, layer_name="RB4")
RB5 = Resblock(input=RB4, knum=128, layer_name="RB5")
RB6 = Resblock(input=RB5, knum=128, layer_name="RB6")
RB7 = Resblock(input=RB6, knum=128, layer_name="RB7")
# layers : 23
RB8 = Resblock(input=RB7, knum=256, layer_name="RB8")
RB9 = Resblock(input=RB8, knum=256, layer_name="RB9")
RB10 = Resblock(input=RB9, knum=256, layer_name="RB10")
RB11 = Resblock(input=RB10, knum=256, layer_name="RB11")
RB12 = Resblock(input=RB11, knum=256, layer_name="RB12")
RB13 = Resblock(input=RB12, knum=256, layer_name="RB13")
RB14 = Resblock(input=RB13, knum=256, layer_name="RB14")
RB15 = Resblock(input=RB14, knum=256, layer_name="RB15")
RB16 = Resblock(input=RB15, knum=256, layer_name="RB16")
RB17 = Resblock(input=RB16, knum=256, layer_name="RB17")
RB18 = Resblock(input=RB17, knum=256, layer_name="RB18")
RB19 = Resblock(input=RB18, knum=256, layer_name="RB19")
RB20 = Resblock(input=RB19, knum=256, layer_name="RB20")
RB21 = Resblock(input=RB20, knum=256, layer_name="RB21")
RB22 = Resblock(input=RB21, knum=256, layer_name="RB22")
RB23 = Resblock(input=RB22, knum=256, layer_name="RB23")
RB24 = Resblock(input=RB23, knum=256, layer_name="RB24")
RB25 = Resblock(input=RB24, knum=256, layer_name="RB25")
RB26 = Resblock(input=RB25, knum=256, layer_name="RB26")
RB27 = Resblock(input=RB26, knum=256, layer_name="RB27")
RB28 = Resblock(input=RB27, knum=256, layer_name="RB28")
RB29 = Resblock(input=RB28, knum=256, layer_name="RB29")
RB30 = Resblock(input=RB29, knum=256, layer_name="RB30")
# layers : 3
RB31 = Resblock(input=RB30, knum=512, layer_name="RB31")
RB32 = Resblock(input=RB31, knum=512, layer_name="RB32")
RB33 = Resblock(input=RB32, knum=512, layer_name="RB33")
# TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB33)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=input, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.002),
loss=SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy()
)
return model
class Resnet20_cifar(ResNet34):
def build_net(self):
inputs = keras.Input(shape=self.input_shape)
#HEAD(NO-Pool)
Conv = Conv2D(filters=16, kernel_size=3, kernel_initializer="he_normal",
strides=1, activation="relu", padding="same", name="Conv")(inputs)
#BODY
RB1 = Resblock(input=Conv, knum=16, layer_name="RB1")
RB2 = Resblock(input=RB1, knum=16, layer_name="RB2")
RB3 = Resblock(input=RB2, knum=16, layer_name="RB3")
RB4 = Resblock(input=RB3, knum=16, layer_name="RB4")
RB5 = Resblock(input=RB4, knum=16, layer_name="RB5")
RB6 = Resblock(input=RB5, knum=16, layer_name="RB6")
RB7 = Resblock(input=RB6, knum=32, layer_name="RB7", verbose=True)
RB8 = Resblock(input=RB7, knum=32, layer_name="RB8")
RB9 = Resblock(input=RB8, knum=32, layer_name="RB9")
RB10 = Resblock(input=RB9, knum=32, layer_name="RB10")
RB11 = Resblock(input=RB10, knum=32, layer_name="RB11")
RB12 = Resblock(input=RB11, knum=32, layer_name="RB12")
RB13 = Resblock(input=RB12, knum=64, layer_name="RB13", verbose=True)
RB14 = Resblock(input=RB13, knum=64, layer_name="RB14")
RB15 = Resblock(input=RB14, knum=64, layer_name="RB15")
RB16 = Resblock(input=RB15, knum=64, layer_name="RB16")
RB17 = Resblock(input=RB16, knum=64, layer_name="RB17")
RB18 = Resblock(input=RB17, knum=64, layer_name="RB18")
#TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB18)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.001),
loss=SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy()
)
return model
class Resnet56_cifar(ResNet34):
def build_net(self):
inputs = keras.Input(shape=self.input_shape)
# HEAD(NO-Pool)
Conv = Conv2D(filters=16, kernel_size=3, kernel_initializer="he_normal",
strides=1, activation="relu", padding="same", name="Conv")(inputs)
# BODY
RB1 = Resblock(input=Conv, knum=16, layer_name="RB1")
RB2 = Resblock(input=RB1, knum=16, layer_name="RB2")
RB3 = Resblock(input=RB2, knum=16, layer_name="RB3")
RB4 = Resblock(input=RB3, knum=16, layer_name="RB4")
RB5 = Resblock(input=RB4, knum=16, layer_name="RB5")
RB6 = Resblock(input=RB5, knum=16, layer_name="RB6")
RB7 = Resblock(input=RB6, knum=16, layer_name="RB7")
RB8 = Resblock(input=RB7, knum=16, layer_name="RB8")
RB9 = Resblock(input=RB8, knum=16, layer_name="RB9")
RB10 = Resblock(input=RB9, knum=32, layer_name="RB10", verbose=True)
RB11 = Resblock(input=RB10, knum=32, layer_name="RB11")
RB12 = Resblock(input=RB11, knum=32, layer_name="RB12")
RB13 = Resblock(input=RB12, knum=32, layer_name="RB13")
RB14 = Resblock(input=RB13, knum=32, layer_name="RB14")
RB15 = Resblock(input=RB14, knum=32, layer_name="RB15")
RB16 = Resblock(input=RB15, knum=32, layer_name="RB16")
RB17 = Resblock(input=RB16, knum=32, layer_name="RB17")
RB18 = Resblock(input=RB17, knum=32, layer_name="RB18")
RB19 = Resblock(input=RB18, knum=64, layer_name="RB19", verbose=True)
RB20 = Resblock(input=RB19, knum=64, layer_name="RB20")
RB21 = Resblock(input=RB20, knum=64, layer_name="RB21")
RB22 = Resblock(input=RB21, knum=64, layer_name="RB22")
RB23 = Resblock(input=RB22, knum=64, layer_name="RB23")
RB24 = Resblock(input=RB23, knum=64, layer_name="RB24")
RB25 = Resblock(input=RB24, knum=64, layer_name="RB25")
RB26 = Resblock(input=RB25, knum=64, layer_name="RB26")
RB27 = Resblock(input=RB26, knum=64, layer_name="RB27")
# TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB27)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.001),
loss=SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy()
)
return model
class Resnet110_cifar(ResNet34):
def build_net(self):
inputs = keras.Input(shape=self.input_shape)
# HEAD(NO-Pool)
Conv = Conv2D(filters=16, kernel_size=3, kernel_initializer="he_normal",
strides=1, activation="relu", padding="same", name="Conv")(inputs)
# BODY
RB1 = Resblock(input=Conv, knum=16, layer_name="RB1")
RB2 = Resblock(input=RB1, knum=16, layer_name="RB2")
RB3 = Resblock(input=RB2, knum=16, layer_name="RB3")
RB4 = Resblock(input=RB3, knum=16, layer_name="RB4")
RB5 = Resblock(input=RB4, knum=16, layer_name="RB5")
RB6 = Resblock(input=RB5, knum=16, layer_name="RB6")
RB7 = Resblock(input=RB6, knum=16, layer_name="RB7")
RB8 = Resblock(input=RB7, knum=16, layer_name="RB8")
RB9 = Resblock(input=RB8, knum=16, layer_name="RB9")
RB10 = Resblock(input=RB9, knum=16, layer_name="RB10")
RB11 = Resblock(input=RB10, knum=16, layer_name="RB11")
RB12 = Resblock(input=RB11, knum=16, layer_name="RB12")
RB13 = Resblock(input=RB12, knum=16, layer_name="RB13")
RB14 = Resblock(input=RB13, knum=16, layer_name="RB14")
RB15 = Resblock(input=RB14, knum=16, layer_name="RB15")
RB16 = Resblock(input=RB15, knum=16, layer_name="RB16")
RB17 = Resblock(input=RB16, knum=16, layer_name="RB17")
RB18 = Resblock(input=RB17, knum=16, layer_name="RB18")
RB19 = Resblock(input=RB18, knum=32, layer_name="RB19", verbose=True)
RB20 = Resblock(input=RB19, knum=32, layer_name="RB20")
RB21 = Resblock(input=RB20, knum=32, layer_name="RB21")
RB22 = Resblock(input=RB21, knum=32, layer_name="RB22")
RB23 = Resblock(input=RB22, knum=32, layer_name="RB23")
RB24 = Resblock(input=RB23, knum=32, layer_name="RB24")
RB25 = Resblock(input=RB24, knum=32, layer_name="RB25")
RB26 = Resblock(input=RB25, knum=32, layer_name="RB26")
RB27 = Resblock(input=RB26, knum=32, layer_name="RB27")
RB28 = Resblock(input=RB27, knum=32, layer_name="RB28")
RB29 = Resblock(input=RB28, knum=32, layer_name="RB29")
RB30 = Resblock(input=RB29, knum=32, layer_name="RB30")
RB31 = Resblock(input=RB30, knum=32, layer_name="RB31")
RB32 = Resblock(input=RB31, knum=32, layer_name="RB32")
RB33 = Resblock(input=RB32, knum=32, layer_name="RB33")
RB34 = Resblock(input=RB33, knum=32, layer_name="RB34")
RB35 = Resblock(input=RB34, knum=32, layer_name="RB35")
RB36 = Resblock(input=RB35, knum=32, layer_name="RB36")
RB37 = Resblock(input=RB36, knum=64, layer_name="RB37", verbose=True)
RB38 = Resblock(input=RB37, knum=64, layer_name="RB38")
RB39 = Resblock(input=RB38, knum=64, layer_name="RB39")
RB40 = Resblock(input=RB39, knum=64, layer_name="RB40")
RB41 = Resblock(input=RB40, knum=64, layer_name="RB41")
RB42 = Resblock(input=RB41, knum=64, layer_name="RB42")
RB43 = Resblock(input=RB42, knum=64, layer_name="RB43")
RB44 = Resblock(input=RB43, knum=64, layer_name="RB44")
RB45 = Resblock(input=RB44, knum=64, layer_name="RB45")
RB46 = Resblock(input=RB45, knum=64, layer_name="RB46")
RB47 = Resblock(input=RB46, knum=64, layer_name="RB47")
RB48 = Resblock(input=RB47, knum=64, layer_name="RB48")
RB49 = Resblock(input=RB48, knum=64, layer_name="RB49")
RB50 = Resblock(input=RB49, knum=64, layer_name="RB50")
RB51 = Resblock(input=RB50, knum=64, layer_name="RB51")
RB52 = Resblock(input=RB51, knum=64, layer_name="RB52")
RB53 = Resblock(input=RB52, knum=64, layer_name="RB53")
RB54 = Resblock(input=RB53, knum=64, layer_name="RB54")
# TAIL
TAIL_AVG = AvgPool2D(pool_size=3, strides=1, padding="same")(RB54)
TAIL_FLT = Flatten()(TAIL_AVG)
outputs = Dense(units=self.num_classes, activation="softmax")(TAIL_FLT)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(0.001),
loss=SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy()
)
return model
if __name__ == "__main__" :
EPOCH_ITER = 30
BATCH_SIZE = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
datagen = ImageDataGenerator()
datagen.fit(x_train)
Resnet = Resnet20_cifar(input_shape=(32,32,3), num_classes=10)
model = Resnet.build_net()
# Load Weights
# model.load_weights(filepath="./resnet_weights/resnet20_02.hdf5", by_name=True)
# model.trainable = False
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint(filepath='./resnet_weights/resnet20_{epoch:02d}.hdf5'),
keras.callbacks.TensorBoard(log_dir="./logs/resnet20_cifar",
update_freq="batch")
]
history = model.fit(datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
steps_per_epoch=len(x_train)/BATCH_SIZE,
epochs=EPOCH_ITER,
callbacks=[callbacks],
validation_data=(x_val, | |
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
import os
import django
from django import forms, template
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django.http import Http404, HttpResponseBadRequest
from django.template import TemplateDoesNotExist
from django.template.defaultfilters import slugify
from django.test import TestCase
from django.utils import timezone
from django.utils.encoding import force_text
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from mptt.exceptions import InvalidMove
from feincms import settings as feincms_settings
from feincms.content.application.models import app_reverse
from feincms.contents import RawContent, RichTextContent
from feincms.context_processors import add_page_if_missing
from feincms.models import ContentProxy
from feincms.module.medialibrary.models import Category, MediaFile
from feincms.module.page.extensions.navigation import PagePretender
from feincms.module.page.models import Page
from feincms.templatetags import feincms_page_tags
from feincms.translations import short_language_code
from .test_stuff import Empty
# ------------------------------------------------------------------------
class PagesTestCase(TestCase):
def setUp(self):
u = User(username="test", is_active=True, is_staff=True, is_superuser=True)
u.set_password("<PASSWORD>")
u.save()
self.site_1 = Site.objects.all()[0]
Page.register_templates(
{
"key": "base",
"title": "Standard template",
"path": "feincms_base.html",
"regions": (
("main", "Main content area"),
("sidebar", "Sidebar", "inherited"),
),
},
{
"key": "theother",
"title": "This actually exists",
"path": "base.html",
"regions": (
("main", "Main content area"),
("sidebar", "Sidebar", "inherited"),
),
},
)
def login(self):
self.assertTrue(self.client.login(username="test", password="<PASSWORD>"))
def create_page_through_admin(self, title="Test page", parent="", **kwargs):
dic = {
"title": title,
"slug": kwargs.get("slug", slugify(title)),
"parent": parent,
"template_key": "base",
"publication_date_0": "2009-01-01",
"publication_date_1": "00:00:00",
"initial-publication_date_0": "2009-01-01",
"initial-publication_date_1": "00:00:00",
"language": "en",
"navigation_group": "default",
"site": self.site_1.id,
"rawcontent_set-TOTAL_FORMS": 0,
"rawcontent_set-INITIAL_FORMS": 0,
"rawcontent_set-MAX_NUM_FORMS": 10,
"mediafilecontent_set-TOTAL_FORMS": 0,
"mediafilecontent_set-INITIAL_FORMS": 0,
"mediafilecontent_set-MAX_NUM_FORMS": 10,
"imagecontent_set-TOTAL_FORMS": 0,
"imagecontent_set-INITIAL_FORMS": 0,
"imagecontent_set-MAX_NUM_FORMS": 10,
"contactformcontent_set-TOTAL_FORMS": 0,
"contactformcontent_set-INITIAL_FORMS": 0,
"contactformcontent_set-MAX_NUM_FORMS": 10,
"filecontent_set-TOTAL_FORMS": 0,
"filecontent_set-INITIAL_FORMS": 0,
"filecontent_set-MAX_NUM_FORMS": 10,
"templatecontent_set-TOTAL_FORMS": 0,
"templatecontent_set-INITIAL_FORMS": 0,
"templatecontent_set-MAX_NUM_FORMS": 10,
"applicationcontent_set-TOTAL_FORMS": 0,
"applicationcontent_set-INITIAL_FORMS": 0,
"applicationcontent_set-MAX_NUM_FORMS": 10,
}
dic.update(kwargs)
return self.client.post("/admin/page/page/add/", dic)
def create_default_page_set_through_admin(self):
self.login()
self.create_page_through_admin()
return self.create_page_through_admin("Test child page", 1)
def create_page(self, title="Test page", parent=None, **kwargs):
defaults = {
"template_key": "base",
"site": self.site_1,
"in_navigation": False,
"active": False,
"navigation_group": "default",
}
defaults.update(kwargs)
return Page.objects.create(
title=title,
slug=kwargs.get("slug", slugify(title)),
parent=parent,
**defaults
)
def create_default_page_set(self):
self.create_page("Test child page", parent=self.create_page())
def is_published(self, url, should_be=True):
try:
self.client.get(url)
except TemplateDoesNotExist as e:
if should_be:
if e.args != ("feincms_base.html",):
raise
else:
if e.args != ("404.html",):
raise
def test_01_tree_editor(self):
self.login()
self.assertEqual(self.client.get("/admin/page/page/").status_code, 200)
self.assertRedirects(
self.client.get("/admin/page/page/?anything=anything"),
"/admin/page/page/?e=1",
)
def test_02_add_page(self):
self.login()
self.assertRedirects(
self.create_page_through_admin(title="Test page " * 10, slug="test-page"),
"/admin/page/page/",
)
self.assertEqual(Page.objects.count(), 1)
self.assertContains(self.client.get("/admin/page/page/"), "…")
def test_03_item_editor(self):
self.login()
self.assertRedirects(
self.create_page_through_admin(_continue=1),
reverse("admin:page_page_change", args=(1,)),
)
self.assertEqual(
self.client.get(reverse("admin:page_page_change", args=(1,))).status_code,
200,
)
self.is_published(
reverse("admin:page_page_change", args=(42,)), should_be=False
)
def test_03_add_another(self):
self.login()
self.assertRedirects(
self.create_page_through_admin(_addanother=1), "/admin/page/page/add/"
)
def test_04_add_child(self):
response = self.create_default_page_set_through_admin()
self.assertRedirects(response, "/admin/page/page/")
self.assertEqual(Page.objects.count(), 2)
page = Page.objects.get(pk=2)
self.assertEqual(page.get_absolute_url(), "/test-page/test-child-page/")
page.active = True
page.in_navigation = True
page.save()
# page2 inherited the inactive flag from the toplevel page
self.assertContains(self.client.get("/admin/page/page/"), "inherited")
page1 = Page.objects.get(pk=1)
page1.active = True
page1.save()
content = self.client.get("/admin/page/page/").content.decode("utf-8")
self.assertEqual(len(content.split('checked="checked"')), 4)
def test_05_override_url(self):
self.create_default_page_set()
page = Page.objects.get(pk=1)
page.override_url = "/something/"
page.save()
page2 = Page.objects.get(pk=2)
self.assertEqual(page2.get_absolute_url(), "/something/test-child-page/")
page.override_url = "/"
page.save()
page2 = Page.objects.get(pk=2)
self.assertEqual(page2.get_absolute_url(), "/test-child-page/")
self.is_published("/", False)
page.active = True
page.template_key = "<PASSWORD>"
page.save()
self.is_published("/", True)
def test_06_tree_editor_save(self):
self.create_default_page_set()
page1 = Page.objects.get(pk=1)
page2 = Page.objects.get(pk=2)
page3 = Page.objects.create(title="page3", slug="page3", parent=page2)
page4 = Page.objects.create(title="page4", slug="page4", parent=page1)
page5 = Page.objects.create(title="page5", slug="page5", parent=None)
self.assertEqual(page3.get_absolute_url(), "/test-page/test-child-page/page3/")
self.assertEqual(page4.get_absolute_url(), "/test-page/page4/")
self.assertEqual(page5.get_absolute_url(), "/page5/")
self.login()
self.client.post(
"/admin/page/page/",
{
"__cmd": "move_node",
"position": "last-child",
"cut_item": "1",
"pasted_on": "5",
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(Page.objects.get(pk=1).get_absolute_url(), "/page5/test-page/")
self.assertEqual(Page.objects.get(pk=5).get_absolute_url(), "/page5/")
self.assertEqual(
Page.objects.get(pk=3).get_absolute_url(),
"/page5/test-page/test-child-page/page3/",
)
def test_07_tree_editor_toggle_boolean(self):
self.create_default_page_set()
self.assertEqual(Page.objects.get(pk=1).in_navigation, False)
self.login()
self.assertContains(
self.client.post(
"/admin/page/page/",
{"__cmd": "toggle_boolean", "item_id": 1, "attr": "in_navigation"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
),
r"checked=\"checked\"",
)
self.assertEqual(Page.objects.get(pk=1).in_navigation, True)
self.assertNotContains(
self.client.post(
"/admin/page/page/",
{"__cmd": "toggle_boolean", "item_id": 1, "attr": "in_navigation"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
),
'checked="checked"',
)
self.assertEqual(Page.objects.get(pk=1).in_navigation, False)
self.assertTrue(
isinstance(
self.client.post(
"/admin/page/page/",
{"__cmd": "toggle_boolean", "item_id": 1, "attr": "notexists"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
),
HttpResponseBadRequest,
)
)
def test_07_tree_editor_invalid_ajax(self):
self.login()
self.assertContains(
self.client.post(
"/admin/page/page/",
{"__cmd": "notexists"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
),
"Oops. AJAX request not understood.",
status_code=400,
)
def test_08_publishing(self):
self.create_default_page_set()
page = Page.objects.get(pk=1)
page2 = Page.objects.get(pk=2)
self.is_published(page.get_absolute_url(), should_be=False)
self.is_published(page2.get_absolute_url(), should_be=False)
page.active = True
page.save()
page2.active = True
page2.save()
self.is_published(page.get_absolute_url(), should_be=True)
self.is_published(page2.get_absolute_url(), should_be=True)
old_publication = page.publication_date
page.publication_date = timezone.now() + timedelta(days=1)
page.save()
self.is_published(page.get_absolute_url(), should_be=False)
# Should be not accessible because of its parent's inactivity
self.is_published(page2.get_absolute_url(), should_be=False)
page.publication_date = old_publication
page.publication_end_date = timezone.now() - timedelta(days=1)
page.save()
self.is_published(page.get_absolute_url(), should_be=False)
# Should be not accessible because of its parent's inactivity
self.is_published(page2.get_absolute_url(), should_be=False)
page.publication_end_date = timezone.now() + timedelta(days=1)
page.save()
self.is_published(page.get_absolute_url(), should_be=True)
self.is_published(page2.get_absolute_url(), should_be=True)
def create_page_through_admincontent(self, page, **kwargs):
data = {
"title": page.title,
"slug": page.slug,
# 'parent': page.parent_id, # this field is excluded from the form
"template_key": page.template_key,
"publication_date_0": "2009-01-01",
"publication_date_1": "00:00:00",
"initial-publication_date_0": "2009-01-01",
"initial-publication_date_1": "00:00:00",
"language": "en",
"navigation_group": "default",
"site": self.site_1.id,
"rawcontent_set-TOTAL_FORMS": 1,
"rawcontent_set-INITIAL_FORMS": 0,
"rawcontent_set-MAX_NUM_FORMS": 10,
"rawcontent_set-0-parent": 1,
"rawcontent_set-0-region": "main",
"rawcontent_set-0-ordering": 0,
"rawcontent_set-0-text": "This is some example content",
"mediafilecontent_set-TOTAL_FORMS": 1,
"mediafilecontent_set-INITIAL_FORMS": 0,
"mediafilecontent_set-MAX_NUM_FORMS": 10,
"mediafilecontent_set-0-parent": 1,
"mediafilecontent_set-0-type": "default",
"templatecontent_set-TOTAL_FORMS": 1,
"templatecontent_set-INITIAL_FORMS": 0,
"templatecontent_set-MAX_NUM_FORMS": 10,
"applicationcontent_set-TOTAL_FORMS": 1,
"applicationcontent_set-INITIAL_FORMS": 0,
"applicationcontent_set-MAX_NUM_FORMS": 10,
}
data.update(kwargs)
return self.client.post(
reverse("admin:page_page_change", args=(page.pk,)), data
)
def test_09_pagecontent(self):
self.create_default_page_set()
page = Page.objects.get(pk=1)
self.login()
response = self.create_page_through_admincontent(page)
self.assertRedirects(response, "/admin/page/page/")
self.assertEqual(page.content.main[0].__class__.__name__, "RawContent")
page2 = Page.objects.get(pk=2)
page2.symlinked_page = page
# Test that all_of_type works correctly even before accessing
# other content methods
self.assertEqual(len(page2.content.all_of_type(RawContent)), 1)
self.assertEqual(page2.content.main[0].__class__.__name__, "RawContent")
self.assertEqual(
force_text(page2.content.main[0]),
"RawContent<pk=1, parent=Page<pk=1, Test page>, region=main,"
" ordering=0>",
)
self.assertEqual(len(page2.content.main), 1)
self.assertEqual(len(page2.content.sidebar), 0)
self.assertEqual(len(page2.content.nonexistant_region), 0)
self.assertTrue(isinstance(page2.content.media, forms.Media))
self.assertEqual(len(page2.content.all_of_type(RawContent)), 1)
def test_10_mediafile_and_imagecontent(self):
self.create_default_page_set()
self.login()
page = Page.objects.get(pk=1)
self.create_page_through_admincontent(page)
category = Category.objects.create(title="Category", parent=None)
category2 = Category.objects.create(title="Something", parent=category)
self.assertEqual(force_text(category2), "Category - Something")
self.assertEqual(force_text(category), "Category")
mediafile = MediaFile.objects.create(file="somefile.jpg")
if django.VERSION < (2, 0):
mediafile.categories = [category]
else:
mediafile.categories.set([category])
page.mediafilecontent_set.create(
mediafile=mediafile, region="main", type="default", ordering=1
)
self.assertEqual(force_text(mediafile), "somefile.jpg")
mediafile.translations.create(
caption="something", language_code="%s-ha" % short_language_code()
)
mediafile.purge_translation_cache()
self.assertTrue("something" in force_text(mediafile))
mf = page.content.main[1].mediafile
self.assertEqual(mf.translation.caption, "something")
self.assertEqual(mf.translation.short_language_code(), short_language_code())
self.assertNotEqual(mf.get_absolute_url(), "")
self.assertEqual(force_text(mf), "something")
self.assertTrue(mf.type == "image")
self.assertEqual(MediaFile.objects.only_language("de").count(), 0)
self.assertEqual(MediaFile.objects.only_language("en").count(), 0)
self.assertEqual(
MediaFile.objects.only_language(
lambda: "%s-ha" % short_language_code()
).count(),
1,
)
self.assertTrue("%s-ha" % short_language_code() in mf.available_translations)
# this should not raise
self.client.get(reverse("admin:page_page_change", args=(1,)))
page.mediafilecontent_set.update(mediafile=3) # WTF is this?
# this should not raise
self.client.get("/admin/page/page/1/")
page.mediafilecontent_set.update(mediafile=mf.id) # Revert changes
field = MediaFile._meta.get_field("file")
old = (field.upload_to, field.storage, field.generate_filename)
from django.core.files.storage import FileSystemStorage
MediaFile.reconfigure(
upload_to=lambda: "anywhere",
storage=FileSystemStorage(location="/wha/", base_url="/whe/"),
)
mediafile = MediaFile.objects.get(pk=1)
self.assertEqual(mediafile.file.url, "/whe/somefile.jpg")
# restore settings
(field.upload_to, field.storage, field.generate_filename) = old
mediafile = MediaFile.objects.get(pk=1)
self.assertEqual(mediafile.file.url, "/media/somefile.jpg")
def test_11_translations(self):
self.create_default_page_set()
page1 = Page.objects.get(pk=1)
self.assertEqual(len(page1.available_translations()), 0)
page1 = Page.objects.get(pk=1)
page2 = Page.objects.get(pk=2)
page1.active = True
page1.save()
page2.active = True
page2.language = "de"
page2.save()
self.assertEqual(len(page2.available_translations()), 0)
page2.translation_of = page1
page2.save()
self.assertEqual(len(page2.available_translations()), 1)
self.assertEqual(len(page1.available_translations()), 1)
self.assertEqual(page1, page1.original_translation)
self.assertEqual(page1, page2.original_translation)
def test_12_titles(self):
self.create_default_page_set()
page = Page.objects.get(pk=1)
self.assertEqual(page.page_title, page.title)
self.assertEqual(page.content_title, page.title)
page._content_title = "Something\nawful"
page._page_title = "Hello world"
page.save()
self.assertEqual(page.page_title, "Hello world")
self.assertEqual(page.content_title, "Something")
self.assertEqual(page.content_subtitle, "awful")
page._content_title = "Only one line"
self.assertEqual(page.content_title, "Only one line")
self.assertEqual(page.content_subtitle, "")
page._content_title = ""
self.assertEqual(page.content_title, page.title)
self.assertEqual(page.content_subtitle, "")
def test_13_inheritance_and_ct_tracker(self):
self.create_default_page_set()
page = Page.objects.get(pk=1)
page.rawcontent_set.create(region="sidebar", ordering=0, text="Something")
page.rawcontent_set.create(region="main", ordering=0, text="Anything")
page2 = Page.objects.get(pk=2)
page2.rawcontent_set.create(region="main", ordering=0, text="Something else")
page2.rawcontent_set.create(region="main", ordering=1, text="Whatever")
# Set default, non-caching content proxy
page2.content_proxy_class = ContentProxy
if hasattr(self, "assertNumQueries"):
# 4 queries: Two to get the content types of page and page2, one to
# fetch all ancestor PKs of page2 and one to materialize the
# RawContent instances belonging to page's sidebar and page2's
# main.
self.assertNumQueries(
4, lambda: [page2.content.main, page2.content.sidebar]
)
self.assertNumQueries(0, lambda: page2.content.sidebar[0].render())
self.assertEqual(
"".join(c.render() for c in page2.content.main), "Something elseWhatever"
)
self.assertEqual(page2.content.sidebar[0].render(), "Something")
page2 = Page.objects.get(pk=2)
self.assertEqual(page2._ct_inventory, {})
# Prime Django content type cache
for ct in Page._feincms_content_types:
ContentType.objects.get_for_model(ct)
if hasattr(self, "assertNumQueries"):
# 5 queries: Two to get the content types of page and page2, one to
# fetch all ancestor PKs of page2 and one to materialize the
# RawContent instances belonging to page's sidebar and page2's main
# and a few queries to update the pages _ct_inventory attributes:
# - one update to update page2
# - one update to clobber the _ct_inventory attribute of all
# descendants of page2
self.assertNumQueries(
5, lambda: [page2.content.main, page2.content.sidebar]
)
self.assertNumQueries(0, lambda: page2.content.sidebar[0].render())
self.assertEqual(page2.content.sidebar[0].render(), "Something")
# Reload, again, to test ct_tracker extension
page2 = Page.objects.get(pk=2)
if hasattr(self, "assertNumQueries"):
self.assertNumQueries(
1, lambda: [page2.content.main, page2.content.sidebar]
)
self.assertNotEqual(page2._ct_inventory, {})
def test_14_richtext(self):
# only create the content type to test the item editor
# customization hooks
tmp = Page._feincms_content_types[:]
type = Page.create_content_type(RichTextContent, regions=("notexists",))
Page._feincms_content_types = tmp
from django.utils.safestring import SafeData
obj = type()
obj.text = "Something"
self.assertTrue(isinstance(obj.render(), SafeData))
def test_17_page_template_tags(self):
self.create_default_page_set()
page1 = Page.objects.get(pk=1)
page2 = Page.objects.get(pk=2)
page2.language = "de"
page2.translation_of = page1
page2.active = True
page2.in_navigation = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.